repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
diogovk/ansible
|
refs/heads/devel
|
v1/ansible/runner/lookup_plugins/lines.py
|
176
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend(stdout.splitlines())
else:
raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
|
loopCM/chromium
|
refs/heads/trunk
|
tools/PRESUBMIT.py
|
35
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
def _ExamineBisectConfigFile(input_api, output_api):
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('run-bisect-perf-regression.cfg'):
continue
try:
cfg_file = imp.load_source('config', 'run-bisect-perf-regression.cfg')
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineBisectConfigFile(input_api, output_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CommonChecks(input_api, output_api):
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
Dandandan/wikiprogramming
|
refs/heads/master
|
jsrepl/extern/python/unclosured/lib/python2.7/lib2to3/fixes/fix_operator.py
|
326
|
"""Fixer for operator functions.
operator.isCallable(obj) -> hasattr(obj, '__call__')
operator.sequenceIncludes(obj) -> operator.contains(obj)
operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence)
operator.isMappingType(obj) -> isinstance(obj, collections.Mapping)
operator.isNumberType(obj) -> isinstance(obj, numbers.Number)
operator.repeat(obj, n) -> operator.mul(obj, n)
operator.irepeat(obj, n) -> operator.imul(obj, n)
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, touch_import
def invocation(s):
def dec(f):
f.invocation = s
return f
return dec
class FixOperator(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
methods = """
method=('isCallable'|'sequenceIncludes'
|'isSequenceType'|'isMappingType'|'isNumberType'
|'repeat'|'irepeat')
"""
obj = "'(' obj=any ')'"
PATTERN = """
power< module='operator'
trailer< '.' %(methods)s > trailer< %(obj)s > >
|
power< %(methods)s trailer< %(obj)s > >
""" % dict(methods=methods, obj=obj)
def transform(self, node, results):
method = self._check_method(node, results)
if method is not None:
return method(node, results)
@invocation("operator.contains(%s)")
def _sequenceIncludes(self, node, results):
return self._handle_rename(node, results, u"contains")
@invocation("hasattr(%s, '__call__')")
def _isCallable(self, node, results):
obj = results["obj"]
args = [obj.clone(), String(u", "), String(u"'__call__'")]
return Call(Name(u"hasattr"), args, prefix=node.prefix)
@invocation("operator.mul(%s)")
def _repeat(self, node, results):
return self._handle_rename(node, results, u"mul")
@invocation("operator.imul(%s)")
def _irepeat(self, node, results):
return self._handle_rename(node, results, u"imul")
@invocation("isinstance(%s, collections.Sequence)")
def _isSequenceType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Sequence")
@invocation("isinstance(%s, collections.Mapping)")
def _isMappingType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Mapping")
@invocation("isinstance(%s, numbers.Number)")
def _isNumberType(self, node, results):
return self._handle_type2abc(node, results, u"numbers", u"Number")
def _handle_rename(self, node, results, name):
method = results["method"][0]
method.value = name
method.changed()
def _handle_type2abc(self, node, results, module, abc):
touch_import(None, module, node)
obj = results["obj"]
args = [obj.clone(), String(u", " + u".".join([module, abc]))]
return Call(Name(u"isinstance"), args, prefix=node.prefix)
def _check_method(self, node, results):
method = getattr(self, "_" + results["method"][0].value.encode("ascii"))
if callable(method):
if "module" in results:
return method
else:
sub = (unicode(results["obj"]),)
invocation_str = unicode(method.invocation) % sub
self.warning(node, u"You should use '%s' here." % invocation_str)
return None
|
dwf/numpy
|
refs/heads/master
|
numpy/core/tests/test_indexerrors.py
|
7
|
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises, assert_equal, assert_
import sys
class TestIndexErrors(TestCase):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1,2,3])
assert_raises(IndexError, lambda: a[0,5,None,2])
assert_raises(IndexError, lambda: a[0,5,0,2])
assert_raises(IndexError, lambda: assign(a, (0,5,None,2), 1))
assert_raises(IndexError, lambda: assign(a, (0,5,0,2), 1))
a = np.zeros([1,0,3])
assert_raises(IndexError, lambda: a[0,0,None,2])
assert_raises(IndexError, lambda: assign(a, (0,0,None,2), 1))
a = np.zeros([1,2,3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1,0,3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1,2,3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1,0,3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1,2,3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1,0,3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3,5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3,0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3,5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3,0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
if __name__ == "__main__":
run_module_suite()
|
rancherio/cattle-cli
|
refs/heads/master
|
cattle.py
|
2
|
#!/usr/bin/env python
import time
import gdapi
import os
from gdapi import * # NOQA
DEFAULT_TIMEOUT = 45
class Client(gdapi.Client):
def __init__(self, *args, **kw):
super(Client, self).__init__(*args, **kw)
def wait_success(self, obj, timeout=-1):
obj = self.wait_transitioning(obj, timeout)
if obj.transitioning != 'no':
raise gdapi.ClientApiError(obj.transitioningMessage)
return obj
def wait_transitioning(self, obj, timeout=-1, sleep=0.01):
timeout = _get_timeout(timeout)
start = time.time()
obj = self.reload(obj)
while obj.transitioning == 'yes':
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
obj = self.reload(obj)
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] to be done after {} seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
return obj
def _get_timeout(timeout):
if timeout == -1:
return DEFAULT_TIMEOUT
return timeout
def from_env(prefix='CATTLE_', **kw):
return gdapi.from_env(prefix=prefix, factory=Client, **kw)
def _main():
if 'CATTLE_URL' not in os.environ:
os.environ['CATTLE_URL'] = 'http://localhost:8080/v1'
gdapi._main()
if __name__ == '__main__':
_main()
|
lovexiaov/SandwichApp
|
refs/heads/master
|
.eggs/py2app-0.9-py2.7.egg/py2app/bootstrap/virtualenv_site_packages.py
|
10
|
def _site_packages(prefix, real_prefix, global_site_packages):
import site, sys, os
paths = []
prefixes = [sys.prefix]
paths.append(os.path.join(prefix, 'lib', 'python' + sys.version[:3],
'site-packages'))
if os.path.join('.framework', '') in os.path.join(prefix, ''):
home = os.environ.get('HOME')
if home:
paths.append(os.path.join(home, 'Library', 'Python',
sys.version[:3], 'site-packages'))
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
# Ensure that the global site packages get placed on sys.path after
# the site packages from the virtual environment (this functionality
# is also in virtualenv)
sys.__egginsert = len(sys.path)
if global_site_packages:
site.addsitedir(os.path.join(real_prefix, 'lib', 'python' + sys.version[:3],
'site-packages'))
|
loafbaker/django_ecommerce2
|
refs/heads/master
|
orders/__init__.py
|
2
|
default_app_config = 'orders.apps.OrdersConfig'
|
iceman1989/Check_mk
|
refs/heads/master
|
doc/treasures/Event_Console/message_to_syslog.py
|
6
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# This Script enables the sending of messages to a upd syslog server
# like the integrated syslogserver of mkeventd.
#
# Bastian Kuhn, bk@mathias-kettner.de
import time
import socket
import sys
if len(sys.argv) < 6:
print 'This script sends a message via upd to a syslogserver'
print 'Usage: %s SYSLOGSERVER HOSTNAME PRIO APPLICATION "MESSAGE"' % sys.argv[0]
sys.exit()
host = sys.argv[1]
event_host = sys.argv[2]
prio = sys.argv[3]
application = sys.argv[4]
message = sys.argv[5]
port = 514
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((host, port))
timestamp = time.strftime("%b %d %H:%M:%S", time.localtime(time.time()))
sock.send("<%s>%s %s %s: %s\n" % (prio, timestamp, event_host, application, message))
sock.close()
|
odootr/odoo
|
refs/heads/8.0
|
addons/point_of_sale/report/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_users_product
import account_statement
import pos_receipt
import pos_invoice
import pos_lines
import pos_details
import pos_payment_report
import pos_report
import pos_order_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
smlng/RIOT
|
refs/heads/master
|
tests/gnrc_ipv6_nib_6ln/tests/01-run.py
|
59
|
#!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
# Copyright (C) 2016 Takuo Yonezawa <Yonezawa-T2@mail.dnp.co.jp>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect(r"OK \(\d+ tests\)")
if __name__ == "__main__":
sys.exit(run(testfunc))
|
jernsthausen/datesplitter
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/tests/test_find_packages.py
|
151
|
"""Tests for setuptools.find_packages()."""
import os
import sys
import shutil
import tempfile
import platform
import pytest
import setuptools
from setuptools import find_packages
find_420_packages = setuptools.PEP420PackageFinder.find
# modeled after CPython's test.support.can_symlink
def can_symlink():
TESTFN = tempfile.mktemp()
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
globals().update(can_symlink=lambda: can)
return can
def has_symlink():
bad_symlink = (
# Windows symlink directory detection is broken on Python 3.2
platform.system() == 'Windows' and sys.version_info[:2] == (3,2)
)
return can_symlink() and not bad_symlink
class TestFindPackages:
def setup_method(self, method):
self.dist_dir = tempfile.mkdtemp()
self._make_pkg_structure()
def teardown_method(self, method):
shutil.rmtree(self.dist_dir)
def _make_pkg_structure(self):
"""Make basic package structure.
dist/
docs/
conf.py
pkg/
__pycache__/
nspkg/
mod.py
subpkg/
assets/
asset
__init__.py
setup.py
"""
self.docs_dir = self._mkdir('docs', self.dist_dir)
self._touch('conf.py', self.docs_dir)
self.pkg_dir = self._mkdir('pkg', self.dist_dir)
self._mkdir('__pycache__', self.pkg_dir)
self.ns_pkg_dir = self._mkdir('nspkg', self.pkg_dir)
self._touch('mod.py', self.ns_pkg_dir)
self.sub_pkg_dir = self._mkdir('subpkg', self.pkg_dir)
self.asset_dir = self._mkdir('assets', self.sub_pkg_dir)
self._touch('asset', self.asset_dir)
self._touch('__init__.py', self.sub_pkg_dir)
self._touch('setup.py', self.dist_dir)
def _mkdir(self, path, parent_dir=None):
if parent_dir:
path = os.path.join(parent_dir, path)
os.mkdir(path)
return path
def _touch(self, path, dir_=None):
if dir_:
path = os.path.join(dir_, path)
fp = open(path, 'w')
fp.close()
return path
def test_regular_package(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir)
assert packages == ['pkg', 'pkg.subpkg']
def test_exclude(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir, exclude=('pkg.*',))
assert packages == ['pkg']
def test_include_excludes_other(self):
"""
If include is specified, other packages should be excluded.
"""
self._touch('__init__.py', self.pkg_dir)
alt_dir = self._mkdir('other_pkg', self.dist_dir)
self._touch('__init__.py', alt_dir)
packages = find_packages(self.dist_dir, include=['other_pkg'])
assert packages == ['other_pkg']
def test_dir_with_dot_is_skipped(self):
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
data_dir = self._mkdir('some.data', self.pkg_dir)
self._touch('__init__.py', data_dir)
self._touch('file.dat', data_dir)
packages = find_packages(self.dist_dir)
assert 'pkg.some.data' not in packages
def test_dir_with_packages_in_subdir_is_excluded(self):
"""
Ensure that a package in a non-package such as build/pkg/__init__.py
is excluded.
"""
build_dir = self._mkdir('build', self.dist_dir)
build_pkg_dir = self._mkdir('pkg', build_dir)
self._touch('__init__.py', build_pkg_dir)
packages = find_packages(self.dist_dir)
assert 'build.pkg' not in packages
@pytest.mark.skipif(not has_symlink(), reason='Symlink support required')
def test_symlinked_packages_are_included(self):
"""
A symbolically-linked directory should be treated like any other
directory when matched as a package.
Create a link from lpkg -> pkg.
"""
self._touch('__init__.py', self.pkg_dir)
linked_pkg = os.path.join(self.dist_dir, 'lpkg')
os.symlink('pkg', linked_pkg)
assert os.path.isdir(linked_pkg)
packages = find_packages(self.dist_dir)
assert 'lpkg' in packages
def _assert_packages(self, actual, expected):
assert set(actual) == set(expected)
def test_pep420_ns_package(self):
packages = find_420_packages(
self.dist_dir, include=['pkg*'], exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes(self):
packages = find_420_packages(
self.dist_dir, exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes_or_excludes(self):
packages = find_420_packages(self.dist_dir)
expected = [
'docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg', 'pkg.subpkg.assets']
self._assert_packages(packages, expected)
def test_regular_package_with_nested_pep420_ns_packages(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_420_packages(
self.dist_dir, exclude=['docs', 'pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_non_package_dirs(self):
shutil.rmtree(self.docs_dir)
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
packages = find_420_packages(self.dist_dir)
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
|
tiborsimko/invenio-webhooks
|
refs/heads/master
|
invenio_webhooks/errors.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Webhook errors."""
from __future__ import absolute_import
class WebhooksError(Exception):
"""General webhook error."""
class ReceiverDoesNotExist(WebhooksError):
"""Raised when receiver does not exist."""
class InvalidPayload(WebhooksError):
"""Raised when the payload is invalid."""
class InvalidSignature(WebhooksError):
"""Raised when the signature does not match."""
|
guorendong/iridium-browser-ubuntu
|
refs/heads/ubuntu/precise
|
third_party/trace-viewer/third_party/tvcm/third_party/rjsmin/bench/main.py
|
12
|
#!/usr/bin/env python
# -*- coding: ascii -*-
r"""
=================================
Benchmark jsmin implementations
=================================
Benchmark jsmin implementations.
:Copyright:
Copyright 2011 - 2014
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.main [-c COUNT] [-p file] jsfile ...
-c COUNT number of runs per jsfile and minifier. Defaults to 10.
-p file File to write the benchmark results in (pickled)
"""
if __doc__:
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
import sys as _sys
import time as _time
import_notes = []
class jsmins(object):
from bench import jsmin as p_01_simple_port
if _sys.version_info >= (2, 4):
from bench import jsmin_2_0_9 as p_02_jsmin_2_0_9
else:
import_notes.append(
"jsmin_2_0_9 available for python 2.4 and later..."
)
print(import_notes[-1])
try:
import slimit as _slimit_0_8_1
except (ImportError, SyntaxError):
import_notes.append("slimit_0_8_1 could not be imported")
print(import_notes[-1])
else:
class p_03_slimit_0_8_1(object):
pass
p_03_slimit_0_8_1 = p_03_slimit_0_8_1()
p_03_slimit_0_8_1.jsmin = _slimit_0_8_1.minify
class p_04_slimit_0_8_1_mangle(object):
pass
p_04_slimit_0_8_1_mangle = p_04_slimit_0_8_1_mangle()
p_04_slimit_0_8_1_mangle.jsmin = \
lambda x, s=_slimit_0_8_1: s.minify(x, True)
import rjsmin as p_05_rjsmin
try:
import _rjsmin as p_06__rjsmin
except ImportError:
import_notes.append("_rjsmin (C-Port) not available")
print(import_notes[-1])
jsmins.p_05_rjsmin.jsmin = jsmins.p_05_rjsmin._make_jsmin(
python_only=True
)
print("Python Release: %s" % ".".join(map(str, _sys.version_info[:3])))
print("")
def slurp(filename):
""" Load a file """
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
def print_(*value, **kwargs):
""" Print stuff """
(kwargs.get('file') or _sys.stdout).write(
''.join(value) + kwargs.get('end', '\n')
)
def bench(filenames, count):
"""
Benchmark the minifiers with given javascript samples
:Parameters:
`filenames` : sequence
List of filenames
`count` : ``int``
Number of runs per js file and minifier
:Exceptions:
- `RuntimeError` : empty filenames sequence
"""
if not filenames:
raise RuntimeError("Missing files to benchmark")
try:
xrange
except NameError:
xrange = range
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
ports = [item for item in dir(jsmins) if item.startswith('p_')]
ports.sort()
space = max(map(len, ports)) - 4
ports = [(item[5:], getattr(jsmins, item).jsmin) for item in ports]
flush = _sys.stdout.flush
struct = []
inputs = [(filename, slurp(filename)) for filename in filenames]
for filename, script in inputs:
print_("Benchmarking %r..." % filename, end=" ")
flush()
outputs = []
for _, jsmin in ports:
try:
outputs.append(jsmin(script))
except (SystemExit, KeyboardInterrupt):
raise
except:
outputs.append(None)
struct.append(dict(
filename=filename,
sizes=[
(item is not None and len(item) or None) for item in outputs
],
size=len(script),
messages=[],
times=[],
))
print_("(%.1f KiB)" % (struct[-1]['size'] / 1024.0,))
flush()
times = []
for idx, (name, jsmin) in enumerate(ports):
if outputs[idx] is None:
print_(" FAILED %s" % (name,))
struct[-1]['times'].append((name, None))
else:
print_(" Timing %s%s... (%5.1f KiB %s)" % (
name,
" " * (space - len(name)),
len(outputs[idx]) / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(len(outputs[idx]), len(outputs[0]))
],
), end=" ")
flush()
xcount = count
while True:
counted = [None for _ in xrange(xcount)]
start = _time.time()
for _ in counted:
jsmin(script)
end = _time.time()
result = (end - start) * 1000
if result < 10: # avoid measuring within the error range
xcount *= 10
continue
times.append(result / xcount)
break
print_("%8.2f ms" % times[-1], end=" ")
flush()
if len(times) <= 1:
print_()
else:
print_("(factor: %s)" % (', '.join([
'%.2f' % (timed / times[-1]) for timed in times[:-1]
])))
struct[-1]['times'].append((name, times[-1]))
flush()
print_()
return struct
def main(argv=None):
""" Main """
import getopt as _getopt
import os as _os
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hc:p:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print >> _sys.stderr, "%s\nTry %s -mbench.main --help" % (
e,
_os.path.basename(_sys.executable),
)
_sys.exit(2)
count, pickle = 10, None
for key, value in opts:
if key in ("-h", "--help"):
print >> _sys.stderr, (
"%s -mbench.main [-c count] [-p file] cssfile ..." % (
_os.path.basename(_sys.executable),
)
)
_sys.exit(0)
elif key == '-c':
count = int(value)
elif key == '-p':
pickle = str(value)
struct = bench(args, count)
if pickle:
fp = open(pickle, 'wb')
try:
fp.write(_pickle.dumps((
".".join(map(str, _sys.version_info[:3])),
import_notes,
struct,
), 0))
finally:
fp.close()
if __name__ == '__main__':
main()
|
viz-dev/viz
|
refs/heads/master
|
qa/rpc-tests/create_cache.py
|
97
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helper script to create the cache
# (see BitcoinTestFramework.setup_chain)
#
from test_framework.test_framework import BitcoinTestFramework
class CreateCache(BitcoinTestFramework):
def __init__(self):
super().__init__()
# Test network and test nodes are not required:
self.num_nodes = 0
self.nodes = []
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
|
OpenBounds/Processing
|
refs/heads/master
|
validate.py
|
1
|
#!/usr/bin/env python3
import json
import logging
import re
import sys
import click
import jsonschema
import utils
@click.command()
@click.argument("schema", type=click.File("r"), required=True)
@click.argument("jsonfiles", type=click.Path(exists=True), required=True)
def validate(schema, jsonfiles):
"""Validate a JSON files against a JSON schema.
\b
SCHEMA: JSON schema to validate against. Required.
JSONFILE: JSON files to validate. Required.
"""
schema = json.loads(schema.read())
success = True
for path in utils.get_files(jsonfiles):
with open(path) as f:
try:
jsonfile = json.loads(f.read())
except ValueError:
logging.error("Error loading json file " + path)
raise Exception("Invalid json file")
try:
jsonschema.validate(jsonfile, schema)
except Exception as e:
success = False
logging.error("Error validating file " + path)
logging.error(str(e))
if not success:
sys.exit(-1)
if __name__ == "__main__":
validate()
|
justyns/home-assistant
|
refs/heads/dev
|
tests/components/test_zone.py
|
12
|
"""Test zone component."""
import unittest
from homeassistant.components import zone
from tests.common import get_test_home_assistant
class TestComponentZone(unittest.TestCase):
"""Test the zone component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test setup."""
info = {
'name': 'Test Zone',
'latitude': 32.880837,
'longitude': -117.237561,
'radius': 250,
'passive': True
}
assert zone.setup(self.hass, {
'zone': info
})
state = self.hass.states.get('zone.test_zone')
assert info['name'] == state.name
assert info['latitude'] == state.attributes['latitude']
assert info['longitude'] == state.attributes['longitude']
assert info['radius'] == state.attributes['radius']
assert info['passive'] == state.attributes['passive']
def test_active_zone_skips_passive_zones(self):
"""Test active and passive zones."""
assert zone.setup(self.hass, {
'zone': [
{
'name': 'Passive Zone',
'latitude': 32.880600,
'longitude': -117.237561,
'radius': 250,
'passive': True
},
]
})
active = zone.active_zone(self.hass, 32.880600, -117.237561)
assert active is None
assert zone.setup(self.hass, {
'zone': [
{
'name': 'Active Zone',
'latitude': 32.880800,
'longitude': -117.237561,
'radius': 500,
},
]
})
active = zone.active_zone(self.hass, 32.880700, -117.237561)
assert 'zone.active_zone' == active.entity_id
def test_active_zone_prefers_smaller_zone_if_same_distance(self):
"""Test zone size preferences."""
latitude = 32.880600
longitude = -117.237561
assert zone.setup(self.hass, {
'zone': [
{
'name': 'Small Zone',
'latitude': latitude,
'longitude': longitude,
'radius': 250,
},
{
'name': 'Big Zone',
'latitude': latitude,
'longitude': longitude,
'radius': 500,
},
]
})
active = zone.active_zone(self.hass, latitude, longitude)
assert 'zone.small_zone' == active.entity_id
assert zone.setup(self.hass, {
'zone': [
{
'name': 'Smallest Zone',
'latitude': latitude,
'longitude': longitude,
'radius': 50,
},
]
})
active = zone.active_zone(self.hass, latitude, longitude)
assert 'zone.smallest_zone' == active.entity_id
def test_in_zone_works_for_passive_zones(self):
"""Test working in passive zones."""
latitude = 32.880600
longitude = -117.237561
assert zone.setup(self.hass, {
'zone': [
{
'name': 'Passive Zone',
'latitude': latitude,
'longitude': longitude,
'radius': 250,
'passive': True
},
]
})
assert zone.in_zone(self.hass.states.get('zone.passive_zone'),
latitude, longitude)
|
the-it/WS_THEbotIT
|
refs/heads/main
|
archive/online/2018/convert_deprecated_re_templates.py
|
1
|
# successful processed on 2018-06-22
# successful processed on 2018-06-21
import traceback
from pywikibot import Site, Page
from service.ws_re.scanner import ERROTask
from service.ws_re.template import ReDatenException
from service.ws_re.template.re_page import RePage
from tools.bots.pi import OneTimeBot
from tools.petscan import PetScan
from tools.template_finder import TemplateFinder
from tools.template_handler import TemplateHandler
class ConvertDeprecatedReTemplates(OneTimeBot):
_templates = ("RENachtrag/Platzhalter", "RENachtrag", "REDaten/Platzhalter")
@staticmethod
def convert_re_nachtrag(template: str):
template_nachtrag = TemplateHandler(template)
template_daten = TemplateHandler()
new_list = template_nachtrag.get_parameterlist()
new_list.append({"key": "NACHTRAG", "value": "ON"})
template_daten.update_parameters(new_list)
template_daten.set_title("REDaten")
return template_daten.get_str()
@staticmethod
def _gemeinfrei_todesjahr(argument_list: list):
for idx, item in enumerate(argument_list):
if item["key"] == "GEMEINFREI":
year = item["value"]
del argument_list[idx]
if year:
try:
argument_list.insert(idx,
{"key": "TODESJAHR", "value": str(int(year) - 71)})
except ValueError:
raise ValueError("year is strange")
else:
argument_list.insert(idx, {"key": "TODESJAHR", "value": "3333"})
return argument_list
def convert_re_platzhalter(self, template: str):
template_platzhalter = TemplateHandler(template)
template_daten = TemplateHandler()
new_list = template_platzhalter.get_parameterlist()
new_list = self._gemeinfrei_todesjahr(new_list)
template_daten.update_parameters(new_list)
template_daten.set_title("REDaten")
return template_daten.get_str()
def convert_re_nachtrag_platzhalter(self, template: str):
template_platzhalter = TemplateHandler(template)
template_daten = TemplateHandler()
new_list = template_platzhalter.get_parameterlist()
new_list.append({"key": "NACHTRAG", "value": "ON"})
new_list = self._gemeinfrei_todesjahr(new_list)
template_daten.update_parameters(new_list)
template_daten.set_title("REDaten")
return template_daten.get_str()
def convert_all(self, article_text: str):
for template in self._templates:
position = TemplateFinder(article_text).get_positions(template)
while position:
length_article = len(article_text)
if template == "RENachtrag/Platzhalter":
convert_func = self.convert_re_nachtrag_platzhalter
elif template == "RENachtrag":
convert_func = self.convert_re_nachtrag
else:
convert_func = self.convert_re_platzhalter
start = position[0]["pos"][0]
end = position[0]["pos"][1]
pre_article_text = article_text
article_text = convert_func(position[0]["text"])
if start > 0:
article_text = pre_article_text[0:start] + article_text
if end < length_article:
article_text = article_text + pre_article_text[end:]
position = TemplateFinder(article_text).get_positions(template)
return article_text
def search_pages(self): # pragma: no cover
searcher = PetScan()
for template in self._templates:
searcher.add_any_template(template)
searcher.add_namespace(0)
self.logger.info(str(searcher))
lemmas = searcher.run()
self.logger.info(f"{len(lemmas)} to process.")
return lemmas
def task(self): # pragma: no cover
error_task = ERROTask(wiki=self.wiki, debug=False, logger=self.logger)
for lemma in self.search_pages():
page = Page(self.wiki, lemma["title"])
temp_text = page.text
try:
temp_text = self.convert_all(temp_text)
page.text = temp_text
re_page = RePage(page)
if not self.debug:
re_page.save("Entfernen veralteter Vorlagen.")
except (ReDatenException, ValueError):
error = traceback.format_exc().splitlines()[-1]
error_task.task(lemma["title"], error)
error_task.finish_task()
if self.search_pages():
return False
return True
if __name__ == "__main__": # pragma: no cover
WIKI = Site(code="de", fam="wikisource", user="THEbotIT")
with ConvertDeprecatedReTemplates(wiki=WIKI, debug=True) as bot:
bot.run()
|
nickhand/nbodykit
|
refs/heads/master
|
nbodykit/source/catalog/array.py
|
1
|
from nbodykit.base.catalog import CatalogSource
from nbodykit.utils import is_structured_array
from nbodykit import CurrentMPIComm
from astropy.table import Table
import numpy
class ArrayCatalog(CatalogSource):
"""
A CatalogSource initialized from an in-memory :obj:`dict`,
structured :class:`numpy.ndarray`, or :class:`astropy.table.Table`.
Parameters
----------
data : obj:`dict`, :class:`numpy.ndarray`, :class:`astropy.table.Table`
a dictionary, structured ndarray, or astropy Table; items are
interpreted as the columns of the catalog; the length of any item is
used as the size of the catalog.
comm : MPI Communicator, optional
the MPI communicator instance; default (``None``) sets to the
current communicator
**kwargs :
additional keywords to store as meta-data in :attr:`attrs`
"""
@CurrentMPIComm.enable
def __init__(self, data, comm=None, **kwargs):
# convert astropy Tables to structured numpy arrays
if isinstance(data, Table):
data = data.as_array()
# check for structured data
if not isinstance(data, dict):
if not is_structured_array(data):
raise ValueError(("input data to ArrayCatalog must have a "
"structured data type with fields"))
self.comm = comm
self._source = data
# compute the data type
if hasattr(data, 'dtype'):
keys = sorted(data.dtype.names)
else:
keys = sorted(data.keys())
dtype = numpy.dtype([(key, (data[key].dtype, data[key].shape[1:])) for key in keys])
self._dtype = dtype
# verify data types are the same
dtypes = self.comm.gather(dtype, root=0)
if self.comm.rank == 0:
if any(dt != dtypes[0] for dt in dtypes):
raise ValueError("mismatch between dtypes across ranks in Array")
# the local size
self._size = len(self._source[keys[0]])
for key in keys:
if len(self._source[key]) != self._size:
raise ValueError("column `%s` and column `%s` has different size" % (keys[0], key))
# update the meta-data
self.attrs.update(kwargs)
CatalogSource.__init__(self, comm=comm)
@property
def hardcolumns(self):
"""
The union of the columns in the file and any transformed columns.
"""
defaults = CatalogSource.hardcolumns.fget(self)
return list(self._dtype.names) + defaults
def get_hardcolumn(self, col):
"""
Return a column from the underlying data array/dict.
Columns are returned as dask arrays.
"""
if col in self._dtype.names:
return self.make_column(self._source[col])
else:
return CatalogSource.get_hardcolumn(self, col)
|
GunoH/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/utils/math/math2html.py
|
106
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# math2html: convert LaTeX equations to HTML output.
#
# Copyright (C) 2009-2011 Alex Fernández
#
# Released under the terms of the `2-Clause BSD license'_, in short:
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
# Based on eLyXer: convert LyX source files to HTML output.
# http://elyxer.nongnu.org/
# --end--
# Alex 20101110
# eLyXer standalone formula conversion to HTML.
import sys
class Trace(object):
"A tracing class"
debugmode = False
quietmode = False
showlinesmode = False
prefix = None
def debug(cls, message):
"Show a debug message"
if not Trace.debugmode or Trace.quietmode:
return
Trace.show(message, sys.stdout)
def message(cls, message):
"Show a trace message"
if Trace.quietmode:
return
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stdout)
def error(cls, message):
"Show an error message"
message = '* ' + message
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stderr)
def fatal(cls, message):
"Show an error message and terminate"
Trace.error('FATAL: ' + message)
exit(-1)
def show(cls, message, channel):
"Show a message out of a channel"
if sys.version_info < (3,0):
message = message.encode('utf-8')
channel.write(message + '\n')
debug = classmethod(debug)
message = classmethod(message)
error = classmethod(error)
fatal = classmethod(fatal)
show = classmethod(show)
import os.path
import sys
class BibStylesConfig(object):
"Configuration class from elyxer.config file"
abbrvnat = {
u'@article':u'$authors. $title. <i>$journal</i>,{ {$volume:}$pages,} $month $year.{ doi: $doi.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$surname($year)',
u'default':u'$authors. <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
alpha = {
u'@article':u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{: $pages}{, $year}.}{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
u'cite':u'$Sur$YY',
u'default':u'$authors. $title.{ <i>$journal</i>,} $year.{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
}
authordate2 = {
u'@article':u'$authors. $year. $title. <i>$journal</i>, <b>$volume</b>($number), $pages.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book':u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$surname, $year',
u'default':u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
}
default = {
u'@article':u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book':u'{$authors: }<i>$title</i>{ ($editor, ed.)}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@booklet':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@conference':u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inbook':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection':u'$authors: <i>$title</i>{ in <i>$booktitle</i>{ ($editor, ed.)}}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings':u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@manual':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@mastersthesis':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@misc':u'$authors: <i>$title</i>.{{ $publisher,}{ $howpublished,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@phdthesis':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@proceedings':u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@techreport':u'$authors: <i>$title</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@unpublished':u'$authors: “$title”, <i>$journal</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$index',
u'default':u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
defaulttags = {
u'YY':u'??', u'authors':u'', u'surname':u'',
}
ieeetr = {
u'@article':u'$authors, “$title”, <i>$journal</i>, vol. $volume, no. $number, pp. $pages, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book':u'$authors, <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$index',
u'default':u'$authors, “$title”. $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
plain = {
u'@article':u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book':u'$authors. <i>$title</i>. $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection':u'$authors. $title.{ In <i>$booktitle</i> {($editor, ed.)}.} $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings':u'$authors. $title. { <i>$booktitle</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$index',
u'default':u'{$authors. }$title.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
vancouver = {
u'@article':u'$authors. $title. <i>$journal</i>, $year{;{<b>$volume</b>}{($number)}{:$pages}}.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'@book':u'$authors. $title. {$publisher, }$year.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'cite':u'$index',
u'default':u'$authors. $title; {$publisher, }$year.{ $howpublished.}{ URL: <a href="$url">$url</a>.}{ $note.}',
}
class BibTeXConfig(object):
"Configuration class from elyxer.config file"
replaced = {
u'--':u'—', u'..':u'.',
}
class ContainerConfig(object):
"Configuration class from elyxer.config file"
endings = {
u'Align':u'\\end_layout', u'BarredText':u'\\bar',
u'BoldText':u'\\series', u'Cell':u'</cell',
u'ChangeDeleted':u'\\change_unchanged',
u'ChangeInserted':u'\\change_unchanged', u'ColorText':u'\\color',
u'EmphaticText':u'\\emph', u'Hfill':u'\\hfill', u'Inset':u'\\end_inset',
u'Layout':u'\\end_layout', u'LyXFooter':u'\\end_document',
u'LyXHeader':u'\\end_header', u'Row':u'</row', u'ShapedText':u'\\shape',
u'SizeText':u'\\size', u'StrikeOut':u'\\strikeout',
u'TextFamily':u'\\family', u'VersalitasText':u'\\noun',
}
extracttext = {
u'allowed':[u'StringContainer',u'Constant',u'FormulaConstant',],
u'cloned':[u'',],
u'extracted':[u'PlainLayout',u'TaggedText',u'Align',u'Caption',u'TextFamily',u'EmphaticText',u'VersalitasText',u'BarredText',u'SizeText',u'ColorText',u'LangLine',u'Formula',u'Bracket',u'RawText',u'BibTag',u'FormulaNumber',u'AlphaCommand',u'EmptyCommand',u'OneParamFunction',u'SymbolFunction',u'TextFunction',u'FontFunction',u'CombiningFunction',u'DecoratingFunction',u'FormulaSymbol',u'BracketCommand',u'TeXCode',],
}
startendings = {
u'\\begin_deeper':u'\\end_deeper', u'\\begin_inset':u'\\end_inset',
u'\\begin_layout':u'\\end_layout',
}
starts = {
u'':u'StringContainer', u'#LyX':u'BlackBox', u'</lyxtabular':u'BlackBox',
u'<cell':u'Cell', u'<column':u'Column', u'<row':u'Row',
u'\\align':u'Align', u'\\bar':u'BarredText',
u'\\bar default':u'BlackBox', u'\\bar no':u'BlackBox',
u'\\begin_body':u'BlackBox', u'\\begin_deeper':u'DeeperList',
u'\\begin_document':u'BlackBox', u'\\begin_header':u'LyXHeader',
u'\\begin_inset Argument':u'ShortTitle',
u'\\begin_inset Box':u'BoxInset', u'\\begin_inset Branch':u'Branch',
u'\\begin_inset Caption':u'Caption',
u'\\begin_inset CommandInset bibitem':u'BiblioEntry',
u'\\begin_inset CommandInset bibtex':u'BibTeX',
u'\\begin_inset CommandInset citation':u'BiblioCitation',
u'\\begin_inset CommandInset href':u'URL',
u'\\begin_inset CommandInset include':u'IncludeInset',
u'\\begin_inset CommandInset index_print':u'PrintIndex',
u'\\begin_inset CommandInset label':u'Label',
u'\\begin_inset CommandInset line':u'LineInset',
u'\\begin_inset CommandInset nomencl_print':u'PrintNomenclature',
u'\\begin_inset CommandInset nomenclature':u'NomenclatureEntry',
u'\\begin_inset CommandInset ref':u'Reference',
u'\\begin_inset CommandInset toc':u'TableOfContents',
u'\\begin_inset ERT':u'ERT', u'\\begin_inset Flex':u'FlexInset',
u'\\begin_inset Flex Chunkref':u'NewfangledChunkRef',
u'\\begin_inset Flex Marginnote':u'SideNote',
u'\\begin_inset Flex Sidenote':u'SideNote',
u'\\begin_inset Flex URL':u'FlexURL', u'\\begin_inset Float':u'Float',
u'\\begin_inset FloatList':u'ListOf', u'\\begin_inset Foot':u'Footnote',
u'\\begin_inset Formula':u'Formula',
u'\\begin_inset FormulaMacro':u'FormulaMacro',
u'\\begin_inset Graphics':u'Image',
u'\\begin_inset Index':u'IndexReference',
u'\\begin_inset Info':u'InfoInset',
u'\\begin_inset LatexCommand bibitem':u'BiblioEntry',
u'\\begin_inset LatexCommand bibtex':u'BibTeX',
u'\\begin_inset LatexCommand cite':u'BiblioCitation',
u'\\begin_inset LatexCommand citealt':u'BiblioCitation',
u'\\begin_inset LatexCommand citep':u'BiblioCitation',
u'\\begin_inset LatexCommand citet':u'BiblioCitation',
u'\\begin_inset LatexCommand htmlurl':u'URL',
u'\\begin_inset LatexCommand index':u'IndexReference',
u'\\begin_inset LatexCommand label':u'Label',
u'\\begin_inset LatexCommand nomenclature':u'NomenclatureEntry',
u'\\begin_inset LatexCommand prettyref':u'Reference',
u'\\begin_inset LatexCommand printindex':u'PrintIndex',
u'\\begin_inset LatexCommand printnomenclature':u'PrintNomenclature',
u'\\begin_inset LatexCommand ref':u'Reference',
u'\\begin_inset LatexCommand tableofcontents':u'TableOfContents',
u'\\begin_inset LatexCommand url':u'URL',
u'\\begin_inset LatexCommand vref':u'Reference',
u'\\begin_inset Marginal':u'SideNote',
u'\\begin_inset Newline':u'NewlineInset',
u'\\begin_inset Newpage':u'NewPageInset', u'\\begin_inset Note':u'Note',
u'\\begin_inset OptArg':u'ShortTitle',
u'\\begin_inset Phantom':u'PhantomText',
u'\\begin_inset Quotes':u'QuoteContainer',
u'\\begin_inset Tabular':u'Table', u'\\begin_inset Text':u'InsetText',
u'\\begin_inset VSpace':u'VerticalSpace', u'\\begin_inset Wrap':u'Wrap',
u'\\begin_inset listings':u'Listing', u'\\begin_inset space':u'Space',
u'\\begin_layout':u'Layout', u'\\begin_layout Abstract':u'Abstract',
u'\\begin_layout Author':u'Author',
u'\\begin_layout Bibliography':u'Bibliography',
u'\\begin_layout Chunk':u'NewfangledChunk',
u'\\begin_layout Description':u'Description',
u'\\begin_layout Enumerate':u'ListItem',
u'\\begin_layout Itemize':u'ListItem', u'\\begin_layout List':u'List',
u'\\begin_layout LyX-Code':u'LyXCode',
u'\\begin_layout Plain':u'PlainLayout',
u'\\begin_layout Standard':u'StandardLayout',
u'\\begin_layout Title':u'Title', u'\\begin_preamble':u'LyXPreamble',
u'\\change_deleted':u'ChangeDeleted',
u'\\change_inserted':u'ChangeInserted',
u'\\change_unchanged':u'BlackBox', u'\\color':u'ColorText',
u'\\color inherit':u'BlackBox', u'\\color none':u'BlackBox',
u'\\emph default':u'BlackBox', u'\\emph off':u'BlackBox',
u'\\emph on':u'EmphaticText', u'\\emph toggle':u'EmphaticText',
u'\\end_body':u'LyXFooter', u'\\family':u'TextFamily',
u'\\family default':u'BlackBox', u'\\family roman':u'BlackBox',
u'\\hfill':u'Hfill', u'\\labelwidthstring':u'BlackBox',
u'\\lang':u'LangLine', u'\\length':u'InsetLength',
u'\\lyxformat':u'LyXFormat', u'\\lyxline':u'LyXLine',
u'\\newline':u'Newline', u'\\newpage':u'NewPage',
u'\\noindent':u'BlackBox', u'\\noun default':u'BlackBox',
u'\\noun off':u'BlackBox', u'\\noun on':u'VersalitasText',
u'\\paragraph_spacing':u'BlackBox', u'\\series bold':u'BoldText',
u'\\series default':u'BlackBox', u'\\series medium':u'BlackBox',
u'\\shape':u'ShapedText', u'\\shape default':u'BlackBox',
u'\\shape up':u'BlackBox', u'\\size':u'SizeText',
u'\\size normal':u'BlackBox', u'\\start_of_appendix':u'StartAppendix',
u'\\strikeout default':u'BlackBox', u'\\strikeout on':u'StrikeOut',
}
string = {
u'startcommand':u'\\',
}
table = {
u'headers':[u'<lyxtabular',u'<features',],
}
class EscapeConfig(object):
"Configuration class from elyxer.config file"
chars = {
u'\n':u'', u' -- ':u' — ', u'\'':u'’', u'---':u'—', u'`':u'‘',
}
commands = {
u'\\InsetSpace \\space{}':u' ', u'\\InsetSpace \\thinspace{}':u' ',
u'\\InsetSpace ~':u' ', u'\\SpecialChar \\-':u'',
u'\\SpecialChar \\@.':u'.', u'\\SpecialChar \\ldots{}':u'…',
u'\\SpecialChar \\menuseparator':u' ▷ ',
u'\\SpecialChar \\nobreakdash-':u'-', u'\\SpecialChar \\slash{}':u'/',
u'\\SpecialChar \\textcompwordmark{}':u'', u'\\backslash':u'\\',
}
entities = {
u'&':u'&', u'<':u'<', u'>':u'>',
}
html = {
u'/>':u'>',
}
iso885915 = {
u' ':u' ', u' ':u' ', u' ':u' ',
}
nonunicode = {
u' ':u' ',
}
class FormulaConfig(object):
"Configuration class from elyxer.config file"
alphacommands = {
u'\\AA':u'Å', u'\\AE':u'Æ',
u'\\AmS':u'<span class="versalitas">AmS</span>', u'\\DH':u'Ð',
u'\\L':u'Ł', u'\\O':u'Ø', u'\\OE':u'Œ', u'\\TH':u'Þ', u'\\aa':u'å',
u'\\ae':u'æ', u'\\alpha':u'α', u'\\beta':u'β', u'\\delta':u'δ',
u'\\dh':u'ð', u'\\epsilon':u'ϵ', u'\\eta':u'η', u'\\gamma':u'γ',
u'\\i':u'ı', u'\\imath':u'ı', u'\\iota':u'ι', u'\\j':u'ȷ',
u'\\jmath':u'ȷ', u'\\kappa':u'κ', u'\\l':u'ł', u'\\lambda':u'λ',
u'\\mu':u'μ', u'\\nu':u'ν', u'\\o':u'ø', u'\\oe':u'œ', u'\\omega':u'ω',
u'\\phi':u'φ', u'\\pi':u'π', u'\\psi':u'ψ', u'\\rho':u'ρ',
u'\\sigma':u'σ', u'\\ss':u'ß', u'\\tau':u'τ', u'\\textcrh':u'ħ',
u'\\th':u'þ', u'\\theta':u'θ', u'\\upsilon':u'υ', u'\\varDelta':u'∆',
u'\\varGamma':u'Γ', u'\\varLambda':u'Λ', u'\\varOmega':u'Ω',
u'\\varPhi':u'Φ', u'\\varPi':u'Π', u'\\varPsi':u'Ψ', u'\\varSigma':u'Σ',
u'\\varTheta':u'Θ', u'\\varUpsilon':u'Υ', u'\\varXi':u'Ξ',
u'\\varepsilon':u'ε', u'\\varkappa':u'ϰ', u'\\varphi':u'φ',
u'\\varpi':u'ϖ', u'\\varrho':u'ϱ', u'\\varsigma':u'ς',
u'\\vartheta':u'ϑ', u'\\xi':u'ξ', u'\\zeta':u'ζ',
}
array = {
u'begin':u'\\begin', u'cellseparator':u'&', u'end':u'\\end',
u'rowseparator':u'\\\\',
}
bigbrackets = {
u'(':[u'⎛',u'⎜',u'⎝',], u')':[u'⎞',u'⎟',u'⎠',], u'[':[u'⎡',u'⎢',u'⎣',],
u']':[u'⎤',u'⎥',u'⎦',], u'{':[u'⎧',u'⎪',u'⎨',u'⎩',], u'|':[u'|',],
u'}':[u'⎫',u'⎪',u'⎬',u'⎭',], u'∥':[u'∥',],
}
bigsymbols = {
u'∑':[u'⎲',u'⎳',], u'∫':[u'⌠',u'⌡',],
}
bracketcommands = {
u'\\left':u'span class="symbol"',
u'\\left.':u'<span class="leftdot"></span>',
u'\\middle':u'span class="symbol"', u'\\right':u'span class="symbol"',
u'\\right.':u'<span class="rightdot"></span>',
}
combiningfunctions = {
u'\\"':u'̈', u'\\\'':u'́', u'\\^':u'̂', u'\\`':u'̀', u'\\acute':u'́',
u'\\bar':u'̄', u'\\breve':u'̆', u'\\c':u'̧', u'\\check':u'̌',
u'\\dddot':u'⃛', u'\\ddot':u'̈', u'\\dot':u'̇', u'\\grave':u'̀',
u'\\hat':u'̂', u'\\mathring':u'̊', u'\\overleftarrow':u'⃖',
u'\\overrightarrow':u'⃗', u'\\r':u'̊', u'\\s':u'̩',
u'\\textcircled':u'⃝', u'\\textsubring':u'̥', u'\\tilde':u'̃',
u'\\v':u'̌', u'\\vec':u'⃗', u'\\~':u'̃',
}
commands = {
u'\\ ':u' ', u'\\!':u'', u'\\#':u'#', u'\\$':u'$', u'\\%':u'%',
u'\\&':u'&', u'\\,':u' ', u'\\:':u' ', u'\\;':u' ',
u'\\APLdownarrowbox':u'⍗', u'\\APLleftarrowbox':u'⍇',
u'\\APLrightarrowbox':u'⍈', u'\\APLuparrowbox':u'⍐', u'\\Box':u'□',
u'\\Bumpeq':u'≎', u'\\CIRCLE':u'●', u'\\Cap':u'⋒', u'\\CheckedBox':u'☑',
u'\\Circle':u'○', u'\\Coloneqq':u'⩴', u'\\Corresponds':u'≙',
u'\\Cup':u'⋓', u'\\Delta':u'Δ', u'\\Diamond':u'◇', u'\\Downarrow':u'⇓',
u'\\EUR':u'€', u'\\Game':u'⅁', u'\\Gamma':u'Γ', u'\\Im':u'ℑ',
u'\\Join':u'⨝', u'\\LEFTCIRCLE':u'◖', u'\\LEFTcircle':u'◐',
u'\\Lambda':u'Λ', u'\\Leftarrow':u'⇐', u'\\Lleftarrow':u'⇚',
u'\\Longleftarrow':u'⟸', u'\\Longleftrightarrow':u'⟺',
u'\\Longrightarrow':u'⟹', u'\\Lsh':u'↰', u'\\Mapsfrom':u'⇐|',
u'\\Mapsto':u'|⇒', u'\\Omega':u'Ω', u'\\P':u'¶', u'\\Phi':u'Φ',
u'\\Pi':u'Π', u'\\Pr':u'Pr', u'\\Psi':u'Ψ', u'\\RIGHTCIRCLE':u'◗',
u'\\RIGHTcircle':u'◑', u'\\Re':u'ℜ', u'\\Rrightarrow':u'⇛',
u'\\Rsh':u'↱', u'\\S':u'§', u'\\Sigma':u'Σ', u'\\Square':u'☐',
u'\\Subset':u'⋐', u'\\Supset':u'⋑', u'\\Theta':u'Θ', u'\\Uparrow':u'⇑',
u'\\Updownarrow':u'⇕', u'\\Upsilon':u'Υ', u'\\Vdash':u'⊩',
u'\\Vert':u'∥', u'\\Vvdash':u'⊪', u'\\XBox':u'☒', u'\\Xi':u'Ξ',
u'\\Yup':u'⅄', u'\\\\':u'<br/>', u'\\_':u'_', u'\\aleph':u'ℵ',
u'\\amalg':u'∐', u'\\angle':u'∠', u'\\aquarius':u'♒',
u'\\arccos':u'arccos', u'\\arcsin':u'arcsin', u'\\arctan':u'arctan',
u'\\arg':u'arg', u'\\aries':u'♈', u'\\ast':u'∗', u'\\asymp':u'≍',
u'\\backepsilon':u'∍', u'\\backprime':u'‵', u'\\backsimeq':u'⋍',
u'\\backslash':u'\\', u'\\barwedge':u'⊼', u'\\because':u'∵',
u'\\beth':u'ℶ', u'\\between':u'≬', u'\\bigcap':u'∩', u'\\bigcirc':u'○',
u'\\bigcup':u'∪', u'\\bigodot':u'⊙', u'\\bigoplus':u'⊕',
u'\\bigotimes':u'⊗', u'\\bigsqcup':u'⊔', u'\\bigstar':u'★',
u'\\bigtriangledown':u'▽', u'\\bigtriangleup':u'△', u'\\biguplus':u'⊎',
u'\\bigvee':u'∨', u'\\bigwedge':u'∧', u'\\blacklozenge':u'⧫',
u'\\blacksmiley':u'☻', u'\\blacksquare':u'■', u'\\blacktriangle':u'▲',
u'\\blacktriangledown':u'▼', u'\\blacktriangleright':u'▶', u'\\bot':u'⊥',
u'\\bowtie':u'⋈', u'\\box':u'▫', u'\\boxdot':u'⊡', u'\\bullet':u'•',
u'\\bumpeq':u'≏', u'\\cancer':u'♋', u'\\cap':u'∩', u'\\capricornus':u'♑',
u'\\cdot':u'⋅', u'\\cdots':u'⋯', u'\\centerdot':u'∙',
u'\\checkmark':u'✓', u'\\chi':u'χ', u'\\circ':u'○', u'\\circeq':u'≗',
u'\\circledR':u'®', u'\\circledast':u'⊛', u'\\circledcirc':u'⊚',
u'\\circleddash':u'⊝', u'\\clubsuit':u'♣', u'\\coloneqq':u'≔',
u'\\complement':u'∁', u'\\cong':u'≅', u'\\coprod':u'∐',
u'\\copyright':u'©', u'\\cos':u'cos', u'\\cosh':u'cosh', u'\\cot':u'cot',
u'\\coth':u'coth', u'\\csc':u'csc', u'\\cup':u'∪',
u'\\curvearrowleft':u'↶', u'\\curvearrowright':u'↷', u'\\dag':u'†',
u'\\dagger':u'†', u'\\daleth':u'ℸ', u'\\dashleftarrow':u'⇠',
u'\\dashv':u'⊣', u'\\ddag':u'‡', u'\\ddagger':u'‡', u'\\ddots':u'⋱',
u'\\deg':u'deg', u'\\det':u'det', u'\\diagdown':u'╲', u'\\diagup':u'╱',
u'\\diamond':u'◇', u'\\diamondsuit':u'♦', u'\\dim':u'dim', u'\\div':u'÷',
u'\\divideontimes':u'⋇', u'\\dotdiv':u'∸', u'\\doteq':u'≐',
u'\\doteqdot':u'≑', u'\\dotplus':u'∔', u'\\dots':u'…',
u'\\doublebarwedge':u'⌆', u'\\downarrow':u'↓', u'\\downdownarrows':u'⇊',
u'\\downharpoonleft':u'⇃', u'\\downharpoonright':u'⇂', u'\\earth':u'♁',
u'\\ell':u'ℓ', u'\\emptyset':u'∅', u'\\eqcirc':u'≖', u'\\eqcolon':u'≕',
u'\\eqsim':u'≂', u'\\euro':u'€', u'\\exists':u'∃', u'\\exp':u'exp',
u'\\fallingdotseq':u'≒', u'\\female':u'♀', u'\\flat':u'♭',
u'\\forall':u'∀', u'\\frown':u'⌢', u'\\frownie':u'☹', u'\\gcd':u'gcd',
u'\\gemini':u'♊', u'\\geq)':u'≥', u'\\geqq':u'≧', u'\\geqslant':u'≥',
u'\\gets':u'←', u'\\gg':u'≫', u'\\ggg':u'⋙', u'\\gimel':u'ℷ',
u'\\gneqq':u'≩', u'\\gnsim':u'⋧', u'\\gtrdot':u'⋗', u'\\gtreqless':u'⋚',
u'\\gtreqqless':u'⪌', u'\\gtrless':u'≷', u'\\gtrsim':u'≳',
u'\\guillemotleft':u'«', u'\\guillemotright':u'»', u'\\hbar':u'ℏ',
u'\\heartsuit':u'♥', u'\\hfill':u'<span class="hfill"> </span>',
u'\\hom':u'hom', u'\\hookleftarrow':u'↩', u'\\hookrightarrow':u'↪',
u'\\hslash':u'ℏ', u'\\idotsint':u'<span class="bigsymbol">∫⋯∫</span>',
u'\\iiint':u'<span class="bigsymbol">∭</span>',
u'\\iint':u'<span class="bigsymbol">∬</span>', u'\\imath':u'ı',
u'\\inf':u'inf', u'\\infty':u'∞', u'\\invneg':u'⌐', u'\\jmath':u'ȷ',
u'\\jupiter':u'♃', u'\\ker':u'ker', u'\\land':u'∧',
u'\\landupint':u'<span class="bigsymbol">∱</span>', u'\\langle':u'⟨',
u'\\lbrace':u'{', u'\\lbrace)':u'{', u'\\lbrack':u'[', u'\\lceil':u'⌈',
u'\\ldots':u'…', u'\\leadsto':u'⇝', u'\\leftarrow)':u'←',
u'\\leftarrowtail':u'↢', u'\\leftarrowtobar':u'⇤',
u'\\leftharpoondown':u'↽', u'\\leftharpoonup':u'↼',
u'\\leftleftarrows':u'⇇', u'\\leftleftharpoons':u'⥢', u'\\leftmoon':u'☾',
u'\\leftrightarrow':u'↔', u'\\leftrightarrows':u'⇆',
u'\\leftrightharpoons':u'⇋', u'\\leftthreetimes':u'⋋', u'\\leo':u'♌',
u'\\leq)':u'≤', u'\\leqq':u'≦', u'\\leqslant':u'≤', u'\\lessdot':u'⋖',
u'\\lesseqgtr':u'⋛', u'\\lesseqqgtr':u'⪋', u'\\lessgtr':u'≶',
u'\\lesssim':u'≲', u'\\lfloor':u'⌊', u'\\lg':u'lg', u'\\lhd':u'⊲',
u'\\libra':u'♎', u'\\lightning':u'↯', u'\\liminf':u'liminf',
u'\\limsup':u'limsup', u'\\ll':u'≪', u'\\lll':u'⋘', u'\\ln':u'ln',
u'\\lneqq':u'≨', u'\\lnot':u'¬', u'\\lnsim':u'⋦', u'\\log':u'log',
u'\\longleftarrow':u'⟵', u'\\longleftrightarrow':u'⟷',
u'\\longmapsto':u'⟼', u'\\longrightarrow':u'⟶', u'\\looparrowleft':u'↫',
u'\\looparrowright':u'↬', u'\\lor':u'∨', u'\\lozenge':u'◊',
u'\\ltimes':u'⋉', u'\\lyxlock':u'', u'\\male':u'♂', u'\\maltese':u'✠',
u'\\mapsfrom':u'↤', u'\\mapsto':u'↦', u'\\mathcircumflex':u'^',
u'\\max':u'max', u'\\measuredangle':u'∡', u'\\mercury':u'☿',
u'\\mho':u'℧', u'\\mid':u'∣', u'\\min':u'min', u'\\models':u'⊨',
u'\\mp':u'∓', u'\\multimap':u'⊸', u'\\nLeftarrow':u'⇍',
u'\\nLeftrightarrow':u'⇎', u'\\nRightarrow':u'⇏', u'\\nVDash':u'⊯',
u'\\nabla':u'∇', u'\\napprox':u'≉', u'\\natural':u'♮', u'\\ncong':u'≇',
u'\\nearrow':u'↗', u'\\neg':u'¬', u'\\neg)':u'¬', u'\\neptune':u'♆',
u'\\nequiv':u'≢', u'\\newline':u'<br/>', u'\\nexists':u'∄',
u'\\ngeqslant':u'≱', u'\\ngtr':u'≯', u'\\ngtrless':u'≹', u'\\ni':u'∋',
u'\\ni)':u'∋', u'\\nleftarrow':u'↚', u'\\nleftrightarrow':u'↮',
u'\\nleqslant':u'≰', u'\\nless':u'≮', u'\\nlessgtr':u'≸', u'\\nmid':u'∤',
u'\\nolimits':u'', u'\\nonumber':u'', u'\\not':u'¬', u'\\not<':u'≮',
u'\\not=':u'≠', u'\\not>':u'≯', u'\\notbackslash':u'⍀', u'\\notin':u'∉',
u'\\notni':u'∌', u'\\notslash':u'⌿', u'\\nparallel':u'∦',
u'\\nprec':u'⊀', u'\\nrightarrow':u'↛', u'\\nsim':u'≁', u'\\nsimeq':u'≄',
u'\\nsqsubset':u'⊏̸', u'\\nsubseteq':u'⊈', u'\\nsucc':u'⊁',
u'\\nsucccurlyeq':u'⋡', u'\\nsupset':u'⊅', u'\\nsupseteq':u'⊉',
u'\\ntriangleleft':u'⋪', u'\\ntrianglelefteq':u'⋬',
u'\\ntriangleright':u'⋫', u'\\ntrianglerighteq':u'⋭', u'\\nvDash':u'⊭',
u'\\nvdash':u'⊬', u'\\nwarrow':u'↖', u'\\odot':u'⊙',
u'\\officialeuro':u'€', u'\\oiiint':u'<span class="bigsymbol">∰</span>',
u'\\oiint':u'<span class="bigsymbol">∯</span>',
u'\\oint':u'<span class="bigsymbol">∮</span>',
u'\\ointclockwise':u'<span class="bigsymbol">∲</span>',
u'\\ointctrclockwise':u'<span class="bigsymbol">∳</span>',
u'\\ominus':u'⊖', u'\\oplus':u'⊕', u'\\oslash':u'⊘', u'\\otimes':u'⊗',
u'\\owns':u'∋', u'\\parallel':u'∥', u'\\partial':u'∂', u'\\perp':u'⊥',
u'\\pisces':u'♓', u'\\pitchfork':u'⋔', u'\\pluto':u'♇', u'\\pm':u'±',
u'\\pointer':u'➪', u'\\pounds':u'£', u'\\prec':u'≺',
u'\\preccurlyeq':u'≼', u'\\preceq':u'≼', u'\\precsim':u'≾',
u'\\prime':u'′', u'\\prompto':u'∝', u'\\qquad':u' ', u'\\quad':u' ',
u'\\quarternote':u'♩', u'\\rangle':u'⟩', u'\\rbrace':u'}',
u'\\rbrace)':u'}', u'\\rbrack':u']', u'\\rceil':u'⌉', u'\\rfloor':u'⌋',
u'\\rhd':u'⊳', u'\\rightarrow)':u'→', u'\\rightarrowtail':u'↣',
u'\\rightarrowtobar':u'⇥', u'\\rightharpoondown':u'⇁',
u'\\rightharpoonup':u'⇀', u'\\rightharpooondown':u'⇁',
u'\\rightharpooonup':u'⇀', u'\\rightleftarrows':u'⇄',
u'\\rightleftharpoons':u'⇌', u'\\rightmoon':u'☽',
u'\\rightrightarrows':u'⇉', u'\\rightrightharpoons':u'⥤',
u'\\rightthreetimes':u'⋌', u'\\risingdotseq':u'≓', u'\\rtimes':u'⋊',
u'\\sagittarius':u'♐', u'\\saturn':u'♄', u'\\scorpio':u'♏',
u'\\searrow':u'↘', u'\\sec':u'sec', u'\\setminus':u'∖', u'\\sharp':u'♯',
u'\\simeq':u'≃', u'\\sin':u'sin', u'\\sinh':u'sinh', u'\\slash':u'∕',
u'\\smile':u'⌣', u'\\smiley':u'☺', u'\\spadesuit':u'♠',
u'\\sphericalangle':u'∢', u'\\sqcap':u'⊓', u'\\sqcup':u'⊔',
u'\\sqsubset':u'⊏', u'\\sqsubseteq':u'⊑', u'\\sqsupset':u'⊐',
u'\\sqsupseteq':u'⊒', u'\\square':u'□', u'\\star':u'⋆',
u'\\subseteqq':u'⫅', u'\\subsetneqq':u'⫋', u'\\succ':u'≻',
u'\\succcurlyeq':u'≽', u'\\succeq':u'≽', u'\\succnsim':u'⋩',
u'\\succsim':u'≿', u'\\sun':u'☼', u'\\sup':u'sup', u'\\supseteqq':u'⫆',
u'\\supsetneqq':u'⫌', u'\\surd':u'√', u'\\swarrow':u'↙', u'\\tan':u'tan',
u'\\tanh':u'tanh', u'\\taurus':u'♉', u'\\textasciicircum':u'^',
u'\\textasciitilde':u'~', u'\\textbackslash':u'\\',
u'\\textcopyright':u'©\'', u'\\textdegree':u'°', u'\\textellipsis':u'…',
u'\\textemdash':u'—', u'\\textendash':u'—', u'\\texteuro':u'€',
u'\\textgreater':u'>', u'\\textless':u'<', u'\\textordfeminine':u'ª',
u'\\textordmasculine':u'º', u'\\textquotedblleft':u'“',
u'\\textquotedblright':u'”', u'\\textquoteright':u'’',
u'\\textregistered':u'®', u'\\textrightarrow':u'→',
u'\\textsection':u'§', u'\\texttrademark':u'™',
u'\\texttwosuperior':u'²', u'\\textvisiblespace':u' ',
u'\\therefore':u'∴', u'\\top':u'⊤', u'\\triangle':u'△',
u'\\triangleleft':u'⊲', u'\\trianglelefteq':u'⊴', u'\\triangleq':u'≜',
u'\\triangleright':u'▷', u'\\trianglerighteq':u'⊵',
u'\\twoheadleftarrow':u'↞', u'\\twoheadrightarrow':u'↠',
u'\\twonotes':u'♫', u'\\udot':u'⊍', u'\\unlhd':u'⊴', u'\\unrhd':u'⊵',
u'\\unrhl':u'⊵', u'\\uparrow':u'↑', u'\\updownarrow':u'↕',
u'\\upharpoonleft':u'↿', u'\\upharpoonright':u'↾', u'\\uplus':u'⊎',
u'\\upuparrows':u'⇈', u'\\uranus':u'♅', u'\\vDash':u'⊨',
u'\\varclubsuit':u'♧', u'\\vardiamondsuit':u'♦', u'\\varheartsuit':u'♥',
u'\\varnothing':u'∅', u'\\varspadesuit':u'♤', u'\\vdash':u'⊢',
u'\\vdots':u'⋮', u'\\vee':u'∨', u'\\vee)':u'∨', u'\\veebar':u'⊻',
u'\\vert':u'∣', u'\\virgo':u'♍', u'\\wedge':u'∧', u'\\wedge)':u'∧',
u'\\wp':u'℘', u'\\wr':u'≀', u'\\yen':u'¥', u'\\{':u'{', u'\\|':u'∥',
u'\\}':u'}',
}
decoratedcommand = {
}
decoratingfunctions = {
u'\\overleftarrow':u'⟵', u'\\overrightarrow':u'⟶', u'\\widehat':u'^',
}
endings = {
u'bracket':u'}', u'complex':u'\\]', u'endafter':u'}',
u'endbefore':u'\\end{', u'squarebracket':u']',
}
environments = {
u'align':[u'r',u'l',], u'eqnarray':[u'r',u'c',u'l',],
u'gathered':[u'l',u'l',],
}
fontfunctions = {
u'\\boldsymbol':u'b', u'\\mathbb':u'span class="blackboard"',
u'\\mathbb{A}':u'𝔸', u'\\mathbb{B}':u'𝔹', u'\\mathbb{C}':u'ℂ',
u'\\mathbb{D}':u'𝔻', u'\\mathbb{E}':u'𝔼', u'\\mathbb{F}':u'𝔽',
u'\\mathbb{G}':u'𝔾', u'\\mathbb{H}':u'ℍ', u'\\mathbb{J}':u'𝕁',
u'\\mathbb{K}':u'𝕂', u'\\mathbb{L}':u'𝕃', u'\\mathbb{N}':u'ℕ',
u'\\mathbb{O}':u'𝕆', u'\\mathbb{P}':u'ℙ', u'\\mathbb{Q}':u'ℚ',
u'\\mathbb{R}':u'ℝ', u'\\mathbb{S}':u'𝕊', u'\\mathbb{T}':u'𝕋',
u'\\mathbb{W}':u'𝕎', u'\\mathbb{Z}':u'ℤ', u'\\mathbf':u'b',
u'\\mathcal':u'span class="scriptfont"', u'\\mathcal{B}':u'ℬ',
u'\\mathcal{E}':u'ℰ', u'\\mathcal{F}':u'ℱ', u'\\mathcal{H}':u'ℋ',
u'\\mathcal{I}':u'ℐ', u'\\mathcal{L}':u'ℒ', u'\\mathcal{M}':u'ℳ',
u'\\mathcal{R}':u'ℛ', u'\\mathfrak':u'span class="fraktur"',
u'\\mathfrak{C}':u'ℭ', u'\\mathfrak{F}':u'𝔉', u'\\mathfrak{H}':u'ℌ',
u'\\mathfrak{I}':u'ℑ', u'\\mathfrak{R}':u'ℜ', u'\\mathfrak{Z}':u'ℨ',
u'\\mathit':u'i', u'\\mathring{A}':u'Å', u'\\mathring{U}':u'Ů',
u'\\mathring{a}':u'å', u'\\mathring{u}':u'ů', u'\\mathring{w}':u'ẘ',
u'\\mathring{y}':u'ẙ', u'\\mathrm':u'span class="mathrm"',
u'\\mathscr':u'span class="scriptfont"', u'\\mathscr{B}':u'ℬ',
u'\\mathscr{E}':u'ℰ', u'\\mathscr{F}':u'ℱ', u'\\mathscr{H}':u'ℋ',
u'\\mathscr{I}':u'ℐ', u'\\mathscr{L}':u'ℒ', u'\\mathscr{M}':u'ℳ',
u'\\mathscr{R}':u'ℛ', u'\\mathsf':u'span class="mathsf"',
u'\\mathtt':u'tt',
}
hybridfunctions = {
u'\\binom':[u'{$1}{$2}',u'f2{(}f0{f1{$1}f1{$2}}f2{)}',u'span class="binom"',u'span class="binomstack"',u'span class="bigsymbol"',],
u'\\boxed':[u'{$1}',u'f0{$1}',u'span class="boxed"',],
u'\\cfrac':[u'[$p!]{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fullfraction"',u'span class="numerator align-$p"',u'span class="denominator"',u'span class="ignored"',],
u'\\color':[u'{$p!}{$1}',u'f0{$1}',u'span style="color: $p;"',],
u'\\colorbox':[u'{$p!}{$1}',u'f0{$1}',u'span class="colorbox" style="background: $p;"',],
u'\\dbinom':[u'{$1}{$2}',u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})',u'span class="binomial"',u'span class="binomrow"',u'span class="binomcell"',],
u'\\dfrac':[u'{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fullfraction"',u'span class="numerator"',u'span class="denominator"',u'span class="ignored"',],
u'\\displaystyle':[u'{$1}',u'f0{$1}',u'span class="displaystyle"',],
u'\\fbox':[u'{$1}',u'f0{$1}',u'span class="fbox"',],
u'\\fboxrule':[u'{$p!}',u'f0{}',u'ignored',],
u'\\fboxsep':[u'{$p!}',u'f0{}',u'ignored',],
u'\\fcolorbox':[u'{$p!}{$q!}{$1}',u'f0{$1}',u'span class="boxed" style="border-color: $p; background: $q;"',],
u'\\frac':[u'{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fraction"',u'span class="numerator"',u'span class="denominator"',u'span class="ignored"',],
u'\\framebox':[u'[$p!][$q!]{$1}',u'f0{$1}',u'span class="framebox align-$q" style="width: $p;"',],
u'\\href':[u'[$o]{$u!}{$t!}',u'f0{$t}',u'a href="$u"',],
u'\\hspace':[u'{$p!}',u'f0{ }',u'span class="hspace" style="width: $p;"',],
u'\\leftroot':[u'{$p!}',u'f0{ }',u'span class="leftroot" style="width: $p;px"',],
u'\\nicefrac':[u'{$1}{$2}',u'f0{f1{$1}⁄f2{$2}}',u'span class="fraction"',u'sup class="numerator"',u'sub class="denominator"',u'span class="ignored"',],
u'\\parbox':[u'[$p!]{$w!}{$1}',u'f0{1}',u'div class="Boxed" style="width: $w;"',],
u'\\raisebox':[u'{$p!}{$1}',u'f0{$1.font}',u'span class="raisebox" style="vertical-align: $p;"',],
u'\\renewenvironment':[u'{$1!}{$2!}{$3!}',u'',],
u'\\rule':[u'[$v!]{$w!}{$h!}',u'f0/',u'hr class="line" style="width: $w; height: $h;"',],
u'\\scriptscriptstyle':[u'{$1}',u'f0{$1}',u'span class="scriptscriptstyle"',],
u'\\scriptstyle':[u'{$1}',u'f0{$1}',u'span class="scriptstyle"',],
u'\\sqrt':[u'[$0]{$1}',u'f0{f1{$0}f2{√}f4{(}f3{$1}f4{)}}',u'span class="sqrt"',u'sup class="root"',u'span class="radical"',u'span class="root"',u'span class="ignored"',],
u'\\stackrel':[u'{$1}{$2}',u'f0{f1{$1}f2{$2}}',u'span class="stackrel"',u'span class="upstackrel"',u'span class="downstackrel"',],
u'\\tbinom':[u'{$1}{$2}',u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})',u'span class="binomial"',u'span class="binomrow"',u'span class="binomcell"',],
u'\\textcolor':[u'{$p!}{$1}',u'f0{$1}',u'span style="color: $p;"',],
u'\\textstyle':[u'{$1}',u'f0{$1}',u'span class="textstyle"',],
u'\\unit':[u'[$0]{$1}',u'$0f0{$1.font}',u'span class="unit"',],
u'\\unitfrac':[u'[$0]{$1}{$2}',u'$0f0{f1{$1.font}⁄f2{$2.font}}',u'span class="fraction"',u'sup class="unit"',u'sub class="unit"',],
u'\\uproot':[u'{$p!}',u'f0{ }',u'span class="uproot" style="width: $p;px"',],
u'\\url':[u'{$u!}',u'f0{$u}',u'a href="$u"',],
u'\\vspace':[u'{$p!}',u'f0{ }',u'span class="vspace" style="height: $p;"',],
}
hybridsizes = {
u'\\binom':u'$1+$2', u'\\cfrac':u'$1+$2', u'\\dbinom':u'$1+$2+1',
u'\\dfrac':u'$1+$2', u'\\frac':u'$1+$2', u'\\tbinom':u'$1+$2+1',
}
labelfunctions = {
u'\\label':u'a name="#"',
}
limitcommands = {
u'\\int':u'∫', u'\\intop':u'∫', u'\\lim':u'lim', u'\\prod':u'∏',
u'\\smallint':u'∫', u'\\sum':u'∑',
}
# TODO: setting for simple enlarged vs. piecewise symbols
for key in (u'\\int', u'\\intop', u'\\prod', u'\\sum'):
limitcommands[key] = '<span class="symbol">%s</span>' % limitcommands[key]
misccommands = {
u'\\limits':u'LimitPreviousCommand', u'\\newcommand':u'MacroDefinition',
u'\\renewcommand':u'MacroDefinition',
u'\\setcounter':u'SetCounterFunction', u'\\tag':u'FormulaTag',
u'\\tag*':u'FormulaTag',
}
modified = {
u'\n':u'', u' ':u'', u'$':u'', u'&':u' ', u'\'':u'’', u'+':u' + ',
u',':u', ', u'-':u' − ', u'/':u' ⁄ ', u'<':u' < ', u'=':u' = ',
u'>':u' > ', u'@':u'', u'~':u'',
}
onefunctions = {
u'\\Big':u'span class="bigsymbol"', u'\\Bigg':u'span class="hugesymbol"',
u'\\bar':u'span class="bar"', u'\\begin{array}':u'span class="arraydef"',
u'\\big':u'span class="symbol"', u'\\bigg':u'span class="largesymbol"',
u'\\bigl':u'span class="bigsymbol"', u'\\bigr':u'span class="bigsymbol"',
u'\\centering':u'span class="align-center"',
u'\\ensuremath':u'span class="ensuremath"',
u'\\hphantom':u'span class="phantom"',
u'\\noindent':u'span class="noindent"',
u'\\overbrace':u'span class="overbrace"',
u'\\overline':u'span class="overline"',
u'\\phantom':u'span class="phantom"',
u'\\underbrace':u'span class="underbrace"', u'\\underline':u'u',
u'\\vphantom':u'span class="phantom"',
}
spacedcommands = {
u'\\Leftrightarrow':u'⇔', u'\\Rightarrow':u'⇒', u'\\approx':u'≈',
u'\\dashrightarrow':u'⇢', u'\\equiv':u'≡', u'\\ge':u'≥', u'\\geq':u'≥',
u'\\implies':u' ⇒ ', u'\\in':u'∈', u'\\le':u'≤', u'\\leftarrow':u'←',
u'\\leq':u'≤', u'\\ne':u'≠', u'\\neq':u'≠', u'\\not\\in':u'∉',
u'\\propto':u'∝', u'\\rightarrow':u'→', u'\\rightsquigarrow':u'⇝',
u'\\sim':u'~', u'\\subset':u'⊂', u'\\subseteq':u'⊆', u'\\supset':u'⊃',
u'\\supseteq':u'⊇', u'\\times':u'×', u'\\to':u'→',
}
starts = {
u'beginafter':u'}', u'beginbefore':u'\\begin{', u'bracket':u'{',
u'command':u'\\', u'comment':u'%', u'complex':u'\\[', u'simple':u'$',
u'squarebracket':u'[', u'unnumbered':u'*',
}
symbolfunctions = {
u'^':u'sup', u'_':u'sub',
}
textfunctions = {
u'\\mbox':u'span class="mbox"', u'\\text':u'span class="text"',
u'\\textbf':u'b', u'\\textipa':u'span class="textipa"', u'\\textit':u'i',
u'\\textnormal':u'span class="textnormal"',
u'\\textrm':u'span class="textrm"',
u'\\textsc':u'span class="versalitas"',
u'\\textsf':u'span class="textsf"', u'\\textsl':u'i', u'\\texttt':u'tt',
u'\\textup':u'span class="normal"',
}
unmodified = {
u'characters':[u'.',u'*',u'€',u'(',u')',u'[',u']',u':',u'·',u'!',u';',u'|',u'§',u'"',],
}
urls = {
u'googlecharts':u'http://chart.googleapis.com/chart?cht=tx&chl=',
}
class GeneralConfig(object):
"Configuration class from elyxer.config file"
version = {
u'date':u'2011-06-27', u'lyxformat':u'413', u'number':u'1.2.3',
}
class HeaderConfig(object):
"Configuration class from elyxer.config file"
parameters = {
u'beginpreamble':u'\\begin_preamble', u'branch':u'\\branch',
u'documentclass':u'\\textclass', u'endbranch':u'\\end_branch',
u'endpreamble':u'\\end_preamble', u'language':u'\\language',
u'lstset':u'\\lstset', u'outputchanges':u'\\output_changes',
u'paragraphseparation':u'\\paragraph_separation',
u'pdftitle':u'\\pdf_title', u'secnumdepth':u'\\secnumdepth',
u'tocdepth':u'\\tocdepth',
}
styles = {
u'article':[u'article',u'aastex',u'aapaper',u'acmsiggraph',u'sigplanconf',u'achemso',u'amsart',u'apa',u'arab-article',u'armenian-article',u'article-beamer',u'chess',u'dtk',u'elsarticle',u'heb-article',u'IEEEtran',u'iopart',u'kluwer',u'scrarticle-beamer',u'scrartcl',u'extarticle',u'paper',u'mwart',u'revtex4',u'spie',u'svglobal3',u'ltugboat',u'agu-dtd',u'jgrga',u'agums',u'entcs',u'egs',u'ijmpc',u'ijmpd',u'singlecol-new',u'doublecol-new',u'isprs',u'tarticle',u'jsarticle',u'jarticle',u'jss',u'literate-article',u'siamltex',u'cl2emult',u'llncs',u'svglobal',u'svjog',u'svprobth',],
u'book':[u'book',u'amsbook',u'scrbook',u'extbook',u'tufte-book',u'report',u'extreport',u'scrreprt',u'memoir',u'tbook',u'jsbook',u'jbook',u'mwbk',u'svmono',u'svmult',u'treport',u'jreport',u'mwrep',],
}
class ImageConfig(object):
"Configuration class from elyxer.config file"
converters = {
u'imagemagick':u'convert[ -density $scale][ -define $format:use-cropbox=true] "$input" "$output"',
u'inkscape':u'inkscape "$input" --export-png="$output"',
}
cropboxformats = {
u'.eps':u'ps', u'.pdf':u'pdf', u'.ps':u'ps',
}
formats = {
u'default':u'.png', u'vector':[u'.svg',u'.eps',],
}
class LayoutConfig(object):
"Configuration class from elyxer.config file"
groupable = {
u'allowed':[u'StringContainer',u'Constant',u'TaggedText',u'Align',u'TextFamily',u'EmphaticText',u'VersalitasText',u'BarredText',u'SizeText',u'ColorText',u'LangLine',u'Formula',],
}
class NewfangleConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'chunkref':u'chunkref{', u'endcommand':u'}', u'endmark':u'>',
u'startcommand':u'\\', u'startmark':u'=<',
}
class NumberingConfig(object):
"Configuration class from elyxer.config file"
layouts = {
u'ordered':[u'Chapter',u'Section',u'Subsection',u'Subsubsection',u'Paragraph',],
u'roman':[u'Part',u'Book',],
}
sequence = {
u'symbols':[u'*',u'**',u'†',u'‡',u'§',u'§§',u'¶',u'¶¶',u'#',u'##',],
}
class StyleConfig(object):
"Configuration class from elyxer.config file"
hspaces = {
u'\\enskip{}':u' ', u'\\hfill{}':u'<span class="hfill"> </span>',
u'\\hspace*{\\fill}':u' ', u'\\hspace*{}':u'', u'\\hspace{}':u' ',
u'\\negthinspace{}':u'', u'\\qquad{}':u' ', u'\\quad{}':u' ',
u'\\space{}':u' ', u'\\thinspace{}':u' ', u'~':u' ',
}
quotes = {
u'ald':u'»', u'als':u'›', u'ard':u'«', u'ars':u'‹', u'eld':u'“',
u'els':u'‘', u'erd':u'”', u'ers':u'’', u'fld':u'«',
u'fls':u'‹', u'frd':u'»', u'frs':u'›', u'gld':u'„', u'gls':u'‚',
u'grd':u'“', u'grs':u'‘', u'pld':u'„', u'pls':u'‚', u'prd':u'”',
u'prs':u'’', u'sld':u'”', u'srd':u'”',
}
referenceformats = {
u'eqref':u'(@↕)', u'formatted':u'¶↕', u'nameref':u'$↕', u'pageref':u'#↕',
u'ref':u'@↕', u'vpageref':u'on-page#↕', u'vref':u'@on-page#↕',
}
size = {
u'ignoredtexts':[u'col',u'text',u'line',u'page',u'theight',u'pheight',],
}
vspaces = {
u'bigskip':u'<div class="bigskip"> </div>',
u'defskip':u'<div class="defskip"> </div>',
u'medskip':u'<div class="medskip"> </div>',
u'smallskip':u'<div class="smallskip"> </div>',
u'vfill':u'<div class="vfill"> </div>',
}
class TOCConfig(object):
"Configuration class from elyxer.config file"
extractplain = {
u'allowed':[u'StringContainer',u'Constant',u'TaggedText',u'Align',u'TextFamily',u'EmphaticText',u'VersalitasText',u'BarredText',u'SizeText',u'ColorText',u'LangLine',u'Formula',],
u'cloned':[u'',], u'extracted':[u'',],
}
extracttitle = {
u'allowed':[u'StringContainer',u'Constant',u'Space',],
u'cloned':[u'TextFamily',u'EmphaticText',u'VersalitasText',u'BarredText',u'SizeText',u'ColorText',u'LangLine',u'Formula',],
u'extracted':[u'PlainLayout',u'TaggedText',u'Align',u'Caption',u'StandardLayout',u'FlexInset',],
}
class TagConfig(object):
"Configuration class from elyxer.config file"
barred = {
u'under':u'u',
}
family = {
u'sans':u'span class="sans"', u'typewriter':u'tt',
}
flex = {
u'CharStyle:Code':u'span class="code"',
u'CharStyle:MenuItem':u'span class="menuitem"',
u'Code':u'span class="code"', u'MenuItem':u'span class="menuitem"',
u'Noun':u'span class="noun"', u'Strong':u'span class="strong"',
}
group = {
u'layouts':[u'Quotation',u'Quote',],
}
layouts = {
u'Center':u'div', u'Chapter':u'h?', u'Date':u'h2', u'Paragraph':u'div',
u'Part':u'h1', u'Quotation':u'blockquote', u'Quote':u'blockquote',
u'Section':u'h?', u'Subsection':u'h?', u'Subsubsection':u'h?',
}
listitems = {
u'Enumerate':u'ol', u'Itemize':u'ul',
}
notes = {
u'Comment':u'', u'Greyedout':u'span class="greyedout"', u'Note':u'',
}
shaped = {
u'italic':u'i', u'slanted':u'i', u'smallcaps':u'span class="versalitas"',
}
class TranslationConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'Appendix':u'Appendix', u'Book':u'Book', u'Chapter':u'Chapter',
u'Paragraph':u'Paragraph', u'Part':u'Part', u'Section':u'Section',
u'Subsection':u'Subsection', u'Subsubsection':u'Subsubsection',
u'abstract':u'Abstract', u'bibliography':u'Bibliography',
u'figure':u'figure', u'float-algorithm':u'Algorithm ',
u'float-figure':u'Figure ', u'float-listing':u'Listing ',
u'float-table':u'Table ', u'float-tableau':u'Tableau ',
u'footnotes':u'Footnotes', u'generated-by':u'Document generated by ',
u'generated-on':u' on ', u'index':u'Index',
u'jsmath-enable':u'Please enable JavaScript on your browser.',
u'jsmath-requires':u' requires JavaScript to correctly process the mathematics on this page. ',
u'jsmath-warning':u'Warning: ', u'list-algorithm':u'List of Algorithms',
u'list-figure':u'List of Figures', u'list-table':u'List of Tables',
u'list-tableau':u'List of Tableaux', u'main-page':u'Main page',
u'next':u'Next', u'nomenclature':u'Nomenclature',
u'on-page':u' on page ', u'prev':u'Prev', u'references':u'References',
u'toc':u'Table of Contents', u'toc-for':u'Contents for ', u'up':u'Up',
}
languages = {
u'american':u'en', u'british':u'en', u'deutsch':u'de', u'dutch':u'nl',
u'english':u'en', u'french':u'fr', u'ngerman':u'de', u'spanish':u'es',
}
class CommandLineParser(object):
"A parser for runtime options"
def __init__(self, options):
self.options = options
def parseoptions(self, args):
"Parse command line options"
if len(args) == 0:
return None
while len(args) > 0 and args[0].startswith('--'):
key, value = self.readoption(args)
if not key:
return 'Option ' + value + ' not recognized'
if not value:
return 'Option ' + key + ' needs a value'
setattr(self.options, key, value)
return None
def readoption(self, args):
"Read the key and value for an option"
arg = args[0][2:]
del args[0]
if '=' in arg:
key = self.readequalskey(arg, args)
else:
key = arg.replace('-', '')
if not hasattr(self.options, key):
return None, key
current = getattr(self.options, key)
if isinstance(current, bool):
return key, True
# read value
if len(args) == 0:
return key, None
if args[0].startswith('"'):
initial = args[0]
del args[0]
return key, self.readquoted(args, initial)
value = args[0]
del args[0]
if isinstance(current, list):
current.append(value)
return key, current
return key, value
def readquoted(self, args, initial):
"Read a value between quotes"
value = initial[1:]
while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'):
value += ' ' + args[0]
del args[0]
if len(args) == 0 or args[0].startswith('--'):
return None
value += ' ' + args[0:-1]
return value
def readequalskey(self, arg, args):
"Read a key using equals"
split = arg.split('=', 1)
key = split[0]
value = split[1]
args.insert(0, value)
return key
class Options(object):
"A set of runtime options"
instance = None
location = None
nocopy = False
copyright = False
debug = False
quiet = False
version = False
hardversion = False
versiondate = False
html = False
help = False
showlines = True
unicode = False
iso885915 = False
css = []
title = None
directory = None
destdirectory = None
toc = False
toctarget = ''
tocfor = None
forceformat = None
lyxformat = False
target = None
splitpart = None
memory = True
lowmem = False
nobib = False
converter = 'imagemagick'
raw = False
jsmath = None
mathjax = None
nofooter = False
simplemath = False
template = None
noconvert = False
notoclabels = False
letterfoot = True
numberfoot = False
symbolfoot = False
hoverfoot = True
marginfoot = False
endfoot = False
supfoot = True
alignfoot = False
footnotes = None
imageformat = None
copyimages = False
googlecharts = False
embedcss = []
branches = dict()
def parseoptions(self, args):
"Parse command line options"
Options.location = args[0]
del args[0]
parser = CommandLineParser(Options)
result = parser.parseoptions(args)
if result:
Trace.error(result)
self.usage()
self.processoptions()
def processoptions(self):
"Process all options parsed."
if Options.help:
self.usage()
if Options.version:
self.showversion()
if Options.hardversion:
self.showhardversion()
if Options.versiondate:
self.showversiondate()
if Options.lyxformat:
self.showlyxformat()
if Options.splitpart:
try:
Options.splitpart = int(Options.splitpart)
if Options.splitpart <= 0:
Trace.error('--splitpart requires a number bigger than zero')
self.usage()
except:
Trace.error('--splitpart needs a numeric argument, not ' + Options.splitpart)
self.usage()
if Options.lowmem or Options.toc or Options.tocfor:
Options.memory = False
self.parsefootnotes()
if Options.forceformat and not Options.imageformat:
Options.imageformat = Options.forceformat
if Options.imageformat == 'copy':
Options.copyimages = True
if Options.css == []:
Options.css = ['http://elyxer.nongnu.org/lyx.css']
if Options.html:
Options.simplemath = True
if Options.toc and not Options.tocfor:
Trace.error('Option --toc is deprecated; use --tocfor "page" instead')
Options.tocfor = Options.toctarget
if Options.nocopy:
Trace.error('Option --nocopy is deprecated; it is no longer needed')
# set in Trace if necessary
for param in dir(Trace):
if param.endswith('mode'):
setattr(Trace, param, getattr(self, param[:-4]))
def usage(self):
"Show correct usage"
Trace.error('Usage: ' + os.path.basename(Options.location) + ' [options] [filein] [fileout]')
Trace.error('Convert LyX input file "filein" to HTML file "fileout".')
Trace.error('If filein (or fileout) is not given use standard input (or output).')
Trace.error('Main program of the eLyXer package (http://elyxer.nongnu.org/).')
self.showoptions()
def parsefootnotes(self):
"Parse footnotes options."
if not Options.footnotes:
return
Options.marginfoot = False
Options.letterfoot = False
options = Options.footnotes.split(',')
for option in options:
footoption = option + 'foot'
if hasattr(Options, footoption):
setattr(Options, footoption, True)
else:
Trace.error('Unknown footnotes option: ' + option)
if not Options.endfoot and not Options.marginfoot and not Options.hoverfoot:
Options.hoverfoot = True
if not Options.numberfoot and not Options.symbolfoot:
Options.letterfoot = True
def showoptions(self):
"Show all possible options"
Trace.error(' Common options:')
Trace.error(' --help: show this online help')
Trace.error(' --quiet: disables all runtime messages')
Trace.error('')
Trace.error(' Advanced options:')
Trace.error(' --debug: enable debugging messages (for developers)')
Trace.error(' --version: show version number and release date')
Trace.error(' --lyxformat: return the highest LyX version supported')
Trace.error(' Options for HTML output:')
Trace.error(' --title "title": set the generated page title')
Trace.error(' --css "file.css": use a custom CSS file')
Trace.error(' --embedcss "file.css": embed styles from elyxer.a CSS file into the output')
Trace.error(' --html: output HTML 4.0 instead of the default XHTML')
Trace.error(' --unicode: full Unicode output')
Trace.error(' --iso885915: output a document with ISO-8859-15 encoding')
Trace.error(' --nofooter: remove the footer "generated by eLyXer"')
Trace.error(' --simplemath: do not generate fancy math constructions')
Trace.error(' Options for image output:')
Trace.error(' --directory "img_dir": look for images in the specified directory')
Trace.error(' --destdirectory "dest": put converted images into this directory')
Trace.error(' --imageformat ".ext": image output format, or "copy" to copy images')
Trace.error(' --noconvert: do not convert images, use in original locations')
Trace.error(' --converter "inkscape": use an alternative program to convert images')
Trace.error(' Options for footnote display:')
Trace.error(' --numberfoot: mark footnotes with numbers instead of letters')
Trace.error(' --symbolfoot: mark footnotes with symbols (*, **...)')
Trace.error(' --hoverfoot: show footnotes as hovering text (default)')
Trace.error(' --marginfoot: show footnotes on the page margin')
Trace.error(' --endfoot: show footnotes at the end of the page')
Trace.error(' --supfoot: use superscript for footnote markers (default)')
Trace.error(' --alignfoot: use aligned text for footnote markers')
Trace.error(' --footnotes "options": specify several comma-separated footnotes options')
Trace.error(' Available options are: "number", "symbol", "hover", "margin", "end",')
Trace.error(' "sup", "align"')
Trace.error(' Advanced output options:')
Trace.error(' --splitpart "depth": split the resulting webpage at the given depth')
Trace.error(' --tocfor "page": generate a TOC that points to the given page')
Trace.error(' --target "frame": make all links point to the given frame')
Trace.error(' --notoclabels: omit the part labels in the TOC, such as Chapter')
Trace.error(' --lowmem: do the conversion on the fly (conserve memory)')
Trace.error(' --raw: generate HTML without header or footer.')
Trace.error(' --jsmath "URL": use jsMath from elyxer.the given URL to display equations')
Trace.error(' --mathjax "URL": use MathJax from elyxer.the given URL to display equations')
Trace.error(' --googlecharts: use Google Charts to generate formula images')
Trace.error(' --template "file": use a template, put everything in <!--$content-->')
Trace.error(' --copyright: add a copyright notice at the bottom')
Trace.error(' Deprecated options:')
Trace.error(' --toc: (deprecated) create a table of contents')
Trace.error(' --toctarget "page": (deprecated) generate a TOC for the given page')
Trace.error(' --nocopy: (deprecated) maintained for backwards compatibility')
sys.exit()
def showversion(self):
"Return the current eLyXer version string"
string = 'eLyXer version ' + GeneralConfig.version['number']
string += ' (' + GeneralConfig.version['date'] + ')'
Trace.error(string)
sys.exit()
def showhardversion(self):
"Return just the version string"
Trace.message(GeneralConfig.version['number'])
sys.exit()
def showversiondate(self):
"Return just the version dte"
Trace.message(GeneralConfig.version['date'])
sys.exit()
def showlyxformat(self):
"Return just the lyxformat parameter"
Trace.message(GeneralConfig.version['lyxformat'])
sys.exit()
class BranchOptions(object):
"A set of options for a branch"
def __init__(self, name):
self.name = name
self.options = {'color':'#ffffff'}
def set(self, key, value):
"Set a branch option"
if not key.startswith(ContainerConfig.string['startcommand']):
Trace.error('Invalid branch option ' + key)
return
key = key.replace(ContainerConfig.string['startcommand'], '')
self.options[key] = value
def isselected(self):
"Return if the branch is selected"
if not 'selected' in self.options:
return False
return self.options['selected'] == '1'
def __unicode__(self):
"String representation"
return 'options for ' + self.name + ': ' + unicode(self.options)
import urllib
class Cloner(object):
"An object used to clone other objects."
def clone(cls, original):
"Return an exact copy of an object."
"The original object must have an empty constructor."
return cls.create(original.__class__)
def create(cls, type):
"Create an object of a given class."
clone = type.__new__(type)
clone.__init__()
return clone
clone = classmethod(clone)
create = classmethod(create)
class ContainerExtractor(object):
"A class to extract certain containers."
def __init__(self, config):
"The config parameter is a map containing three lists: allowed, copied and extracted."
"Each of the three is a list of class names for containers."
"Allowed containers are included as is into the result."
"Cloned containers are cloned and placed into the result."
"Extracted containers are looked into."
"All other containers are silently ignored."
self.allowed = config['allowed']
self.cloned = config['cloned']
self.extracted = config['extracted']
def extract(self, container):
"Extract a group of selected containers from elyxer.a container."
list = []
locate = lambda c: c.__class__.__name__ in self.allowed + self.cloned
recursive = lambda c: c.__class__.__name__ in self.extracted
process = lambda c: self.process(c, list)
container.recursivesearch(locate, recursive, process)
return list
def process(self, container, list):
"Add allowed containers, clone cloned containers and add the clone."
name = container.__class__.__name__
if name in self.allowed:
list.append(container)
elif name in self.cloned:
list.append(self.safeclone(container))
else:
Trace.error('Unknown container class ' + name)
def safeclone(self, container):
"Return a new container with contents only in a safe list, recursively."
clone = Cloner.clone(container)
clone.output = container.output
clone.contents = self.extract(container)
return clone
class Parser(object):
"A generic parser"
def __init__(self):
self.begin = 0
self.parameters = dict()
def parseheader(self, reader):
"Parse the header"
header = reader.currentline().split()
reader.nextline()
self.begin = reader.linenumber
return header
def parseparameter(self, reader):
"Parse a parameter"
if reader.currentline().strip().startswith('<'):
key, value = self.parsexml(reader)
self.parameters[key] = value
return
split = reader.currentline().strip().split(' ', 1)
reader.nextline()
if len(split) == 0:
return
key = split[0]
if len(split) == 1:
self.parameters[key] = True
return
if not '"' in split[1]:
self.parameters[key] = split[1].strip()
return
doublesplit = split[1].split('"')
self.parameters[key] = doublesplit[1]
def parsexml(self, reader):
"Parse a parameter in xml form: <param attr1=value...>"
strip = reader.currentline().strip()
reader.nextline()
if not strip.endswith('>'):
Trace.error('XML parameter ' + strip + ' should be <...>')
split = strip[1:-1].split()
if len(split) == 0:
Trace.error('Empty XML parameter <>')
return None, None
key = split[0]
del split[0]
if len(split) == 0:
return key, dict()
attrs = dict()
for attr in split:
if not '=' in attr:
Trace.error('Erroneous attribute for ' + key + ': ' + attr)
attr += '="0"'
parts = attr.split('=')
attrkey = parts[0]
value = parts[1].split('"')[1]
attrs[attrkey] = value
return key, attrs
def parseending(self, reader, process):
"Parse until the current ending is found"
if not self.ending:
Trace.error('No ending for ' + unicode(self))
return
while not reader.currentline().startswith(self.ending):
process()
def parsecontainer(self, reader, contents):
container = self.factory.createcontainer(reader)
if container:
container.parent = self.parent
contents.append(container)
def __unicode__(self):
"Return a description"
return self.__class__.__name__ + ' (' + unicode(self.begin) + ')'
class LoneCommand(Parser):
"A parser for just one command line"
def parse(self,reader):
"Read nothing"
return []
class TextParser(Parser):
"A parser for a command and a bit of text"
stack = []
def __init__(self, container):
Parser.__init__(self)
self.ending = None
if container.__class__.__name__ in ContainerConfig.endings:
self.ending = ContainerConfig.endings[container.__class__.__name__]
self.endings = []
def parse(self, reader):
"Parse lines as long as they are text"
TextParser.stack.append(self.ending)
self.endings = TextParser.stack + [ContainerConfig.endings['Layout'],
ContainerConfig.endings['Inset'], self.ending]
contents = []
while not self.isending(reader):
self.parsecontainer(reader, contents)
return contents
def isending(self, reader):
"Check if text is ending"
current = reader.currentline().split()
if len(current) == 0:
return False
if current[0] in self.endings:
if current[0] in TextParser.stack:
TextParser.stack.remove(current[0])
else:
TextParser.stack = []
return True
return False
class ExcludingParser(Parser):
"A parser that excludes the final line"
def parse(self, reader):
"Parse everything up to (and excluding) the final line"
contents = []
self.parseending(reader, lambda: self.parsecontainer(reader, contents))
return contents
class BoundedParser(ExcludingParser):
"A parser bound by a final line"
def parse(self, reader):
"Parse everything, including the final line"
contents = ExcludingParser.parse(self, reader)
# skip last line
reader.nextline()
return contents
class BoundedDummy(Parser):
"A bound parser that ignores everything"
def parse(self, reader):
"Parse the contents of the container"
self.parseending(reader, lambda: reader.nextline())
# skip last line
reader.nextline()
return []
class StringParser(Parser):
"Parses just a string"
def parseheader(self, reader):
"Do nothing, just take note"
self.begin = reader.linenumber + 1
return []
def parse(self, reader):
"Parse a single line"
contents = reader.currentline()
reader.nextline()
return contents
class InsetParser(BoundedParser):
"Parses a LyX inset"
def parse(self, reader):
"Parse inset parameters into a dictionary"
startcommand = ContainerConfig.string['startcommand']
while reader.currentline() != '' and not reader.currentline().startswith(startcommand):
self.parseparameter(reader)
return BoundedParser.parse(self, reader)
class ContainerOutput(object):
"The generic HTML output for a container."
def gethtml(self, container):
"Show an error."
Trace.error('gethtml() not implemented for ' + unicode(self))
def isempty(self):
"Decide if the output is empty: by default, not empty."
return False
class EmptyOutput(ContainerOutput):
def gethtml(self, container):
"Return empty HTML code."
return []
def isempty(self):
"This output is particularly empty."
return True
class FixedOutput(ContainerOutput):
"Fixed output"
def gethtml(self, container):
"Return constant HTML code"
return container.html
class ContentsOutput(ContainerOutput):
"Outputs the contents converted to HTML"
def gethtml(self, container):
"Return the HTML code"
html = []
if container.contents == None:
return html
for element in container.contents:
if not hasattr(element, 'gethtml'):
Trace.error('No html in ' + element.__class__.__name__ + ': ' + unicode(element))
return html
html += element.gethtml()
return html
class TaggedOutput(ContentsOutput):
"Outputs an HTML tag surrounding the contents."
tag = None
breaklines = False
empty = False
def settag(self, tag, breaklines=False, empty=False):
"Set the value for the tag and other attributes."
self.tag = tag
if breaklines:
self.breaklines = breaklines
if empty:
self.empty = empty
return self
def setbreaklines(self, breaklines):
"Set the value for breaklines."
self.breaklines = breaklines
return self
def gethtml(self, container):
"Return the HTML code."
if self.empty:
return [self.selfclosing(container)]
html = [self.open(container)]
html += ContentsOutput.gethtml(self, container)
html.append(self.close(container))
return html
def open(self, container):
"Get opening line."
if not self.checktag():
return ''
open = '<' + self.tag + '>'
if self.breaklines:
return open + '\n'
return open
def close(self, container):
"Get closing line."
if not self.checktag():
return ''
close = '</' + self.tag.split()[0] + '>'
if self.breaklines:
return '\n' + close + '\n'
return close
def selfclosing(self, container):
"Get self-closing line."
if not self.checktag():
return ''
selfclosing = '<' + self.tag + '/>'
if self.breaklines:
return selfclosing + '\n'
return selfclosing
def checktag(self):
"Check that the tag is valid."
if not self.tag:
Trace.error('No tag in ' + unicode(container))
return False
if self.tag == '':
return False
return True
class FilteredOutput(ContentsOutput):
"Returns the output in the contents, but filtered:"
"some strings are replaced by others."
def __init__(self):
"Initialize the filters."
self.filters = []
def addfilter(self, original, replacement):
"Add a new filter: replace the original by the replacement."
self.filters.append((original, replacement))
def gethtml(self, container):
"Return the HTML code"
result = []
html = ContentsOutput.gethtml(self, container)
for line in html:
result.append(self.filter(line))
return result
def filter(self, line):
"Filter a single line with all available filters."
for original, replacement in self.filters:
if original in line:
line = line.replace(original, replacement)
return line
class StringOutput(ContainerOutput):
"Returns a bare string as output"
def gethtml(self, container):
"Return a bare string"
return [container.string]
import sys
import codecs
class LineReader(object):
"Reads a file line by line"
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
else:
self.file = codecs.open(filename, 'rU', 'utf-8')
self.linenumber = 1
self.lastline = None
self.current = None
self.mustread = True
self.depleted = False
try:
self.readline()
except UnicodeDecodeError:
# try compressed file
import gzip
self.file = gzip.open(filename, 'rb')
self.readline()
def setstart(self, firstline):
"Set the first line to read."
for i in range(firstline):
self.file.readline()
self.linenumber = firstline
def setend(self, lastline):
"Set the last line to read."
self.lastline = lastline
def currentline(self):
"Get the current line"
if self.mustread:
self.readline()
return self.current
def nextline(self):
"Go to next line"
if self.depleted:
Trace.fatal('Read beyond file end')
self.mustread = True
def readline(self):
"Read a line from elyxer.file"
self.current = self.file.readline()
if not isinstance(self.file, codecs.StreamReaderWriter):
self.current = self.current.decode('utf-8')
if len(self.current) == 0:
self.depleted = True
self.current = self.current.rstrip('\n\r')
self.linenumber += 1
self.mustread = False
Trace.prefix = 'Line ' + unicode(self.linenumber) + ': '
if self.linenumber % 1000 == 0:
Trace.message('Parsing')
def finished(self):
"Find out if the file is finished"
if self.lastline and self.linenumber == self.lastline:
return True
if self.mustread:
self.readline()
return self.depleted
def close(self):
self.file.close()
class LineWriter(object):
"Writes a file as a series of lists"
file = False
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
self.filename = None
else:
self.filename = filename
def write(self, strings):
"Write a list of strings"
for string in strings:
if not isinstance(string, basestring):
Trace.error('Not a string: ' + unicode(string) + ' in ' + unicode(strings))
return
self.writestring(string)
def writestring(self, string):
"Write a string"
if not self.file:
self.file = codecs.open(self.filename, 'w', "utf-8")
if self.file == sys.stdout and sys.version_info < (3,0):
string = string.encode('utf-8')
self.file.write(string)
def writeline(self, line):
"Write a line to file"
self.writestring(line + '\n')
def close(self):
self.file.close()
class Globable(object):
"""A bit of text which can be globbed (lumped together in bits).
Methods current(), skipcurrent(), checkfor() and isout() have to be
implemented by subclasses."""
leavepending = False
def __init__(self):
self.endinglist = EndingList()
def checkbytemark(self):
"Check for a Unicode byte mark and skip it."
if self.finished():
return
if ord(self.current()) == 0xfeff:
self.skipcurrent()
def isout(self):
"Find out if we are out of the position yet."
Trace.error('Unimplemented isout()')
return True
def current(self):
"Return the current character."
Trace.error('Unimplemented current()')
return ''
def checkfor(self, string):
"Check for the given string in the current position."
Trace.error('Unimplemented checkfor()')
return False
def finished(self):
"Find out if the current text has finished."
if self.isout():
if not self.leavepending:
self.endinglist.checkpending()
return True
return self.endinglist.checkin(self)
def skipcurrent(self):
"Return the current character and skip it."
Trace.error('Unimplemented skipcurrent()')
return ''
def glob(self, currentcheck):
"Glob a bit of text that satisfies a check on the current char."
glob = ''
while not self.finished() and currentcheck():
glob += self.skipcurrent()
return glob
def globalpha(self):
"Glob a bit of alpha text"
return self.glob(lambda: self.current().isalpha())
def globnumber(self):
"Glob a row of digits."
return self.glob(lambda: self.current().isdigit())
def isidentifier(self):
"Return if the current character is alphanumeric or _."
if self.current().isalnum() or self.current() == '_':
return True
return False
def globidentifier(self):
"Glob alphanumeric and _ symbols."
return self.glob(self.isidentifier)
def isvalue(self):
"Return if the current character is a value character:"
"not a bracket or a space."
if self.current().isspace():
return False
if self.current() in '{}()':
return False
return True
def globvalue(self):
"Glob a value: any symbols but brackets."
return self.glob(self.isvalue)
def skipspace(self):
"Skip all whitespace at current position."
return self.glob(lambda: self.current().isspace())
def globincluding(self, magicchar):
"Glob a bit of text up to (including) the magic char."
glob = self.glob(lambda: self.current() != magicchar) + magicchar
self.skip(magicchar)
return glob
def globexcluding(self, excluded):
"Glob a bit of text up until (excluding) any excluded character."
return self.glob(lambda: self.current() not in excluded)
def pushending(self, ending, optional = False):
"Push a new ending to the bottom"
self.endinglist.add(ending, optional)
def popending(self, expected = None):
"Pop the ending found at the current position"
if self.isout() and self.leavepending:
return expected
ending = self.endinglist.pop(self)
if expected and expected != ending:
Trace.error('Expected ending ' + expected + ', got ' + ending)
self.skip(ending)
return ending
def nextending(self):
"Return the next ending in the queue."
nextending = self.endinglist.findending(self)
if not nextending:
return None
return nextending.ending
class EndingList(object):
"A list of position endings"
def __init__(self):
self.endings = []
def add(self, ending, optional = False):
"Add a new ending to the list"
self.endings.append(PositionEnding(ending, optional))
def pickpending(self, pos):
"Pick any pending endings from a parse position."
self.endings += pos.endinglist.endings
def checkin(self, pos):
"Search for an ending"
if self.findending(pos):
return True
return False
def pop(self, pos):
"Remove the ending at the current position"
if pos.isout():
Trace.error('No ending out of bounds')
return ''
ending = self.findending(pos)
if not ending:
Trace.error('No ending at ' + pos.current())
return ''
for each in reversed(self.endings):
self.endings.remove(each)
if each == ending:
return each.ending
elif not each.optional:
Trace.error('Removed non-optional ending ' + each)
Trace.error('No endings left')
return ''
def findending(self, pos):
"Find the ending at the current position"
if len(self.endings) == 0:
return None
for index, ending in enumerate(reversed(self.endings)):
if ending.checkin(pos):
return ending
if not ending.optional:
return None
return None
def checkpending(self):
"Check if there are any pending endings"
if len(self.endings) != 0:
Trace.error('Pending ' + unicode(self) + ' left open')
def __unicode__(self):
"Printable representation"
string = 'endings ['
for ending in self.endings:
string += unicode(ending) + ','
if len(self.endings) > 0:
string = string[:-1]
return string + ']'
class PositionEnding(object):
"An ending for a parsing position"
def __init__(self, ending, optional):
self.ending = ending
self.optional = optional
def checkin(self, pos):
"Check for the ending"
return pos.checkfor(self.ending)
def __unicode__(self):
"Printable representation"
string = 'Ending ' + self.ending
if self.optional:
string += ' (optional)'
return string
class Position(Globable):
"""A position in a text to parse.
Including those in Globable, functions to implement by subclasses are:
skip(), identifier(), extract(), isout() and current()."""
def __init__(self):
Globable.__init__(self)
def skip(self, string):
"Skip a string"
Trace.error('Unimplemented skip()')
def identifier(self):
"Return an identifier for the current position."
Trace.error('Unimplemented identifier()')
return 'Error'
def extract(self, length):
"Extract the next string of the given length, or None if not enough text,"
"without advancing the parse position."
Trace.error('Unimplemented extract()')
return None
def checkfor(self, string):
"Check for a string at the given position."
return string == self.extract(len(string))
def checkforlower(self, string):
"Check for a string in lower case."
extracted = self.extract(len(string))
if not extracted:
return False
return string.lower() == self.extract(len(string)).lower()
def skipcurrent(self):
"Return the current character and skip it."
current = self.current()
self.skip(current)
return current
def next(self):
"Advance the position and return the next character."
self.skipcurrent()
return self.current()
def checkskip(self, string):
"Check for a string at the given position; if there, skip it"
if not self.checkfor(string):
return False
self.skip(string)
return True
def error(self, message):
"Show an error message and the position identifier."
Trace.error(message + ': ' + self.identifier())
class TextPosition(Position):
"A parse position based on a raw text."
def __init__(self, text):
"Create the position from elyxer.some text."
Position.__init__(self)
self.pos = 0
self.text = text
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
self.pos += len(string)
def identifier(self):
"Return a sample of the remaining text."
length = 30
if self.pos + length > len(self.text):
length = len(self.text) - self.pos
return '*' + self.text[self.pos:self.pos + length] + '*'
def isout(self):
"Find out if we are out of the text yet."
return self.pos >= len(self.text)
def current(self):
"Return the current character, assuming we are not out."
return self.text[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.text):
return None
return self.text[self.pos : self.pos + length]
class FilePosition(Position):
"A parse position based on an underlying file."
def __init__(self, filename):
"Create the position from a file."
Position.__init__(self)
self.reader = LineReader(filename)
self.pos = 0
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
length = len(string)
while self.pos + length > len(self.reader.currentline()):
length -= len(self.reader.currentline()) - self.pos + 1
self.nextline()
self.pos += length
def currentline(self):
"Get the current line of the underlying file."
return self.reader.currentline()
def nextline(self):
"Go to the next line."
self.reader.nextline()
self.pos = 0
def linenumber(self):
"Return the line number of the file."
return self.reader.linenumber + 1
def identifier(self):
"Return the current line and line number in the file."
before = self.reader.currentline()[:self.pos - 1]
after = self.reader.currentline()[self.pos:]
return 'line ' + unicode(self.getlinenumber()) + ': ' + before + '*' + after
def isout(self):
"Find out if we are out of the text yet."
if self.pos > len(self.reader.currentline()):
if self.pos > len(self.reader.currentline()) + 1:
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
self.nextline()
return self.reader.finished()
def current(self):
"Return the current character, assuming we are not out."
if self.pos == len(self.reader.currentline()):
return '\n'
if self.pos > len(self.reader.currentline()):
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
return '*'
return self.reader.currentline()[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.reader.currentline()):
return None
return self.reader.currentline()[self.pos : self.pos + length]
class Container(object):
"A container for text and objects in a lyx file"
partkey = None
parent = None
begin = None
def __init__(self):
self.contents = list()
def process(self):
"Process contents"
pass
def gethtml(self):
"Get the resulting HTML"
html = self.output.gethtml(self)
if isinstance(html, basestring):
Trace.error('Raw string ' + html)
html = [html]
return self.escapeall(html)
def escapeall(self, lines):
"Escape all lines in an array according to the output options."
result = []
for line in lines:
if Options.html:
line = self.escape(line, EscapeConfig.html)
if Options.iso885915:
line = self.escape(line, EscapeConfig.iso885915)
line = self.escapeentities(line)
elif not Options.unicode:
line = self.escape(line, EscapeConfig.nonunicode)
result.append(line)
return result
def escape(self, line, replacements = EscapeConfig.entities):
"Escape a line with replacements from elyxer.a map"
pieces = replacements.keys()
# do them in order
pieces.sort()
for piece in pieces:
if piece in line:
line = line.replace(piece, replacements[piece])
return line
def escapeentities(self, line):
"Escape all Unicode characters to HTML entities."
result = ''
pos = TextPosition(line)
while not pos.finished():
if ord(pos.current()) > 128:
codepoint = hex(ord(pos.current()))
if codepoint == '0xd835':
codepoint = hex(ord(pos.next()) + 0xf800)
result += '&#' + codepoint[1:] + ';'
else:
result += pos.current()
pos.skipcurrent()
return result
def searchall(self, type):
"Search for all embedded containers of a given type"
list = []
self.searchprocess(type, lambda container: list.append(container))
return list
def searchremove(self, type):
"Search for all containers of a type and remove them"
list = self.searchall(type)
for container in list:
container.parent.contents.remove(container)
return list
def searchprocess(self, type, process):
"Search for elements of a given type and process them"
self.locateprocess(lambda container: isinstance(container, type), process)
def locateprocess(self, locate, process):
"Search for all embedded containers and process them"
for container in self.contents:
container.locateprocess(locate, process)
if locate(container):
process(container)
def recursivesearch(self, locate, recursive, process):
"Perform a recursive search in the container."
for container in self.contents:
if recursive(container):
container.recursivesearch(locate, recursive, process)
if locate(container):
process(container)
def extracttext(self):
"Extract all text from elyxer.allowed containers."
result = ''
constants = ContainerExtractor(ContainerConfig.extracttext).extract(self)
for constant in constants:
result += constant.string
return result
def group(self, index, group, isingroup):
"Group some adjoining elements into a group"
if index >= len(self.contents):
return
if hasattr(self.contents[index], 'grouped'):
return
while index < len(self.contents) and isingroup(self.contents[index]):
self.contents[index].grouped = True
group.contents.append(self.contents[index])
self.contents.pop(index)
self.contents.insert(index, group)
def remove(self, index):
"Remove a container but leave its contents"
container = self.contents[index]
self.contents.pop(index)
while len(container.contents) > 0:
self.contents.insert(index, container.contents.pop())
def tree(self, level = 0):
"Show in a tree"
Trace.debug(" " * level + unicode(self))
for container in self.contents:
container.tree(level + 1)
def getparameter(self, name):
"Get the value of a parameter, if present."
if not name in self.parameters:
return None
return self.parameters[name]
def getparameterlist(self, name):
"Get the value of a comma-separated parameter as a list."
paramtext = self.getparameter(name)
if not paramtext:
return []
return paramtext.split(',')
def hasemptyoutput(self):
"Check if the parent's output is empty."
current = self.parent
while current:
if current.output.isempty():
return True
current = current.parent
return False
def __unicode__(self):
"Get a description"
if not self.begin:
return self.__class__.__name__
return self.__class__.__name__ + '@' + unicode(self.begin)
class BlackBox(Container):
"A container that does not output anything"
def __init__(self):
self.parser = LoneCommand()
self.output = EmptyOutput()
self.contents = []
class LyXFormat(BlackBox):
"Read the lyxformat command"
def process(self):
"Show warning if version < 276"
version = int(self.header[1])
if version < 276:
Trace.error('Warning: unsupported old format version ' + str(version))
if version > int(GeneralConfig.version['lyxformat']):
Trace.error('Warning: unsupported new format version ' + str(version))
class StringContainer(Container):
"A container for a single string"
parsed = None
def __init__(self):
self.parser = StringParser()
self.output = StringOutput()
self.string = ''
def process(self):
"Replace special chars from elyxer.the contents."
if self.parsed:
self.string = self.replacespecial(self.parsed)
self.parsed = None
def replacespecial(self, line):
"Replace all special chars from elyxer.a line"
replaced = self.escape(line, EscapeConfig.entities)
replaced = self.changeline(replaced)
if ContainerConfig.string['startcommand'] in replaced and len(replaced) > 1:
# unprocessed commands
if self.begin:
message = 'Unknown command at ' + unicode(self.begin) + ': '
else:
message = 'Unknown command: '
Trace.error(message + replaced.strip())
return replaced
def changeline(self, line):
line = self.escape(line, EscapeConfig.chars)
if not ContainerConfig.string['startcommand'] in line:
return line
line = self.escape(line, EscapeConfig.commands)
return line
def extracttext(self):
"Return all text."
return self.string
def __unicode__(self):
"Return a printable representation."
result = 'StringContainer'
if self.begin:
result += '@' + unicode(self.begin)
ellipsis = '...'
if len(self.string.strip()) <= 15:
ellipsis = ''
return result + ' (' + self.string.strip()[:15] + ellipsis + ')'
class Constant(StringContainer):
"A constant string"
def __init__(self, text):
self.contents = []
self.string = text
self.output = StringOutput()
def __unicode__(self):
return 'Constant: ' + self.string
class TaggedText(Container):
"Text inside a tag"
output = None
def __init__(self):
self.parser = TextParser(self)
self.output = TaggedOutput()
def complete(self, contents, tag, breaklines=False):
"Complete the tagged text and return it"
self.contents = contents
self.output.tag = tag
self.output.breaklines = breaklines
return self
def constant(self, text, tag, breaklines=False):
"Complete the tagged text with a constant"
constant = Constant(text)
return self.complete([constant], tag, breaklines)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self.output, 'tag'):
return 'Emtpy tagged text'
if not self.output.tag:
return 'Tagged <unknown tag>'
return 'Tagged <' + self.output.tag + '>'
class DocumentParameters(object):
"Global parameters for the document."
pdftitle = None
indentstandard = False
tocdepth = 10
startinglevel = 0
maxdepth = 10
language = None
bibliography = None
outputchanges = False
displaymode = False
class FormulaParser(Parser):
"Parses a formula"
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
type = self.parsetype(reader)
if not type:
reader.nextline()
type = self.parsetype(reader)
if not type:
Trace.error('Unknown formula type in ' + reader.currentline().strip())
return ['unknown']
return [type]
def parsetype(self, reader):
"Get the formula type from the first line."
if reader.currentline().find(FormulaConfig.starts['simple']) >= 0:
return 'inline'
if reader.currentline().find(FormulaConfig.starts['complex']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['unnumbered']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['beginbefore']) >= 0:
return 'numbered'
return None
def parse(self, reader):
"Parse the formula until the end"
formula = self.parseformula(reader)
while not reader.currentline().startswith(self.ending):
stripped = reader.currentline().strip()
if len(stripped) > 0:
Trace.error('Unparsed formula line ' + stripped)
reader.nextline()
reader.nextline()
return formula
def parseformula(self, reader):
"Parse the formula contents"
simple = FormulaConfig.starts['simple']
if simple in reader.currentline():
rest = reader.currentline().split(simple, 1)[1]
if simple in rest:
# formula is $...$
return self.parsesingleliner(reader, simple, simple)
# formula is multiline $...$
return self.parsemultiliner(reader, simple, simple)
if FormulaConfig.starts['complex'] in reader.currentline():
# formula of the form \[...\]
return self.parsemultiliner(reader, FormulaConfig.starts['complex'],
FormulaConfig.endings['complex'])
beginbefore = FormulaConfig.starts['beginbefore']
beginafter = FormulaConfig.starts['beginafter']
if beginbefore in reader.currentline():
if reader.currentline().strip().endswith(beginafter):
current = reader.currentline().strip()
endsplit = current.split(beginbefore)[1].split(beginafter)
startpiece = beginbefore + endsplit[0] + beginafter
endbefore = FormulaConfig.endings['endbefore']
endafter = FormulaConfig.endings['endafter']
endpiece = endbefore + endsplit[0] + endafter
return startpiece + self.parsemultiliner(reader, startpiece, endpiece) + endpiece
Trace.error('Missing ' + beginafter + ' in ' + reader.currentline())
return ''
begincommand = FormulaConfig.starts['command']
beginbracket = FormulaConfig.starts['bracket']
if begincommand in reader.currentline() and beginbracket in reader.currentline():
endbracket = FormulaConfig.endings['bracket']
return self.parsemultiliner(reader, beginbracket, endbracket)
Trace.error('Formula beginning ' + reader.currentline() + ' is unknown')
return ''
def parsesingleliner(self, reader, start, ending):
"Parse a formula in one line"
line = reader.currentline().strip()
if not start in line:
Trace.error('Line ' + line + ' does not contain formula start ' + start)
return ''
if not line.endswith(ending):
Trace.error('Formula ' + line + ' does not end with ' + ending)
return ''
index = line.index(start)
rest = line[index + len(start):-len(ending)]
reader.nextline()
return rest
def parsemultiliner(self, reader, start, ending):
"Parse a formula in multiple lines"
formula = ''
line = reader.currentline()
if not start in line:
Trace.error('Line ' + line.strip() + ' does not contain formula start ' + start)
return ''
index = line.index(start)
line = line[index + len(start):].strip()
while not line.endswith(ending):
formula += line + '\n'
reader.nextline()
line = reader.currentline()
formula += line[:-len(ending)]
reader.nextline()
return formula
class MacroParser(FormulaParser):
"A parser for a formula macro."
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
return ['inline']
def parse(self, reader):
"Parse the formula until the end"
formula = self.parsemultiliner(reader, self.parent.start, self.ending)
reader.nextline()
return formula
class FormulaBit(Container):
"A bit of a formula"
type = None
size = 1
original = ''
def __init__(self):
"The formula bit type can be 'alpha', 'number', 'font'."
self.contents = []
self.output = ContentsOutput()
def setfactory(self, factory):
"Set the internal formula factory."
self.factory = factory
return self
def add(self, bit):
"Add any kind of formula bit already processed"
self.contents.append(bit)
self.original += bit.original
bit.parent = self
def skiporiginal(self, string, pos):
"Skip a string and add it to the original formula"
self.original += string
if not pos.checkskip(string):
Trace.error('String ' + string + ' not at ' + pos.identifier())
def computesize(self):
"Compute the size of the bit as the max of the sizes of all contents."
if len(self.contents) == 0:
return 1
self.size = max([element.size for element in self.contents])
return self.size
def clone(self):
"Return a copy of itself."
return self.factory.parseformula(self.original)
def __unicode__(self):
"Get a string representation"
return self.__class__.__name__ + ' read in ' + self.original
class TaggedBit(FormulaBit):
"A tagged string in a formula"
def constant(self, constant, tag):
"Set the constant and the tag"
self.output = TaggedOutput().settag(tag)
self.add(FormulaConstant(constant))
return self
def complete(self, contents, tag, breaklines = False):
"Set the constant and the tag"
self.contents = contents
self.output = TaggedOutput().settag(tag, breaklines)
return self
def selfcomplete(self, tag):
"Set the self-closing tag, no contents (as in <hr/>)."
self.output = TaggedOutput().settag(tag, empty = True)
return self
class FormulaConstant(Constant):
"A constant string in a formula"
def __init__(self, string):
"Set the constant string"
Constant.__init__(self, string)
self.original = string
self.size = 1
self.type = None
def computesize(self):
"Compute the size of the constant: always 1."
return self.size
def clone(self):
"Return a copy of itself."
return FormulaConstant(self.original)
def __unicode__(self):
"Return a printable representation."
return 'Formula constant: ' + self.string
class RawText(FormulaBit):
"A bit of text inside a formula"
def detect(self, pos):
"Detect a bit of raw text"
return pos.current().isalpha()
def parsebit(self, pos):
"Parse alphabetic text"
alpha = pos.globalpha()
self.add(FormulaConstant(alpha))
self.type = 'alpha'
class FormulaSymbol(FormulaBit):
"A symbol inside a formula"
modified = FormulaConfig.modified
unmodified = FormulaConfig.unmodified['characters']
def detect(self, pos):
"Detect a symbol"
if pos.current() in FormulaSymbol.unmodified:
return True
if pos.current() in FormulaSymbol.modified:
return True
return False
def parsebit(self, pos):
"Parse the symbol"
if pos.current() in FormulaSymbol.unmodified:
self.addsymbol(pos.current(), pos)
return
if pos.current() in FormulaSymbol.modified:
self.addsymbol(FormulaSymbol.modified[pos.current()], pos)
return
Trace.error('Symbol ' + pos.current() + ' not found')
def addsymbol(self, symbol, pos):
"Add a symbol"
self.skiporiginal(pos.current(), pos)
self.contents.append(FormulaConstant(symbol))
class FormulaNumber(FormulaBit):
"A string of digits in a formula"
def detect(self, pos):
"Detect a digit"
return pos.current().isdigit()
def parsebit(self, pos):
"Parse a bunch of digits"
digits = pos.glob(lambda: pos.current().isdigit())
self.add(FormulaConstant(digits))
self.type = 'number'
class Comment(FormulaBit):
"A LaTeX comment: % to the end of the line."
start = FormulaConfig.starts['comment']
def detect(self, pos):
"Detect the %."
return pos.current() == self.start
def parsebit(self, pos):
"Parse to the end of the line."
self.original += pos.globincluding('\n')
class WhiteSpace(FormulaBit):
"Some white space inside a formula."
def detect(self, pos):
"Detect the white space."
return pos.current().isspace()
def parsebit(self, pos):
"Parse all whitespace."
self.original += pos.skipspace()
def __unicode__(self):
"Return a printable representation."
return 'Whitespace: *' + self.original + '*'
class Bracket(FormulaBit):
"A {} bracket inside a formula"
start = FormulaConfig.starts['bracket']
ending = FormulaConfig.endings['bracket']
def __init__(self):
"Create a (possibly literal) new bracket"
FormulaBit.__init__(self)
self.inner = None
def detect(self, pos):
"Detect the start of a bracket"
return pos.checkfor(self.start)
def parsebit(self, pos):
"Parse the bracket"
self.parsecomplete(pos, self.innerformula)
return self
def parsetext(self, pos):
"Parse a text bracket"
self.parsecomplete(pos, self.innertext)
return self
def parseliteral(self, pos):
"Parse a literal bracket"
self.parsecomplete(pos, self.innerliteral)
return self
def parsecomplete(self, pos, innerparser):
"Parse the start and end marks"
if not pos.checkfor(self.start):
Trace.error('Bracket should start with ' + self.start + ' at ' + pos.identifier())
return None
self.skiporiginal(self.start, pos)
pos.pushending(self.ending)
innerparser(pos)
self.original += pos.popending(self.ending)
self.computesize()
def innerformula(self, pos):
"Parse a whole formula inside the bracket"
while not pos.finished():
self.add(self.factory.parseany(pos))
def innertext(self, pos):
"Parse some text inside the bracket, following textual rules."
specialchars = FormulaConfig.symbolfunctions.keys()
specialchars.append(FormulaConfig.starts['command'])
specialchars.append(FormulaConfig.starts['bracket'])
specialchars.append(Comment.start)
while not pos.finished():
if pos.current() in specialchars:
self.add(self.factory.parseany(pos))
if pos.checkskip(' '):
self.original += ' '
else:
self.add(FormulaConstant(pos.skipcurrent()))
def innerliteral(self, pos):
"Parse a literal inside the bracket, which does not generate HTML."
self.literal = ''
while not pos.finished() and not pos.current() == self.ending:
if pos.current() == self.start:
self.parseliteral(pos)
else:
self.literal += pos.skipcurrent()
self.original += self.literal
class SquareBracket(Bracket):
"A [] bracket inside a formula"
start = FormulaConfig.starts['squarebracket']
ending = FormulaConfig.endings['squarebracket']
def clone(self):
"Return a new square bracket with the same contents."
bracket = SquareBracket()
bracket.contents = self.contents
return bracket
class MathsProcessor(object):
"A processor for a maths construction inside the FormulaProcessor."
def process(self, contents, index):
"Process an element inside a formula."
Trace.error('Unimplemented process() in ' + unicode(self))
def __unicode__(self):
"Return a printable description."
return 'Maths processor ' + self.__class__.__name__
class FormulaProcessor(object):
"A processor specifically for formulas."
processors = []
def process(self, bit):
"Process the contents of every formula bit, recursively."
self.processcontents(bit)
self.processinsides(bit)
self.traversewhole(bit)
def processcontents(self, bit):
"Process the contents of a formula bit."
if not isinstance(bit, FormulaBit):
return
bit.process()
for element in bit.contents:
self.processcontents(element)
def processinsides(self, bit):
"Process the insides (limits, brackets) in a formula bit."
if not isinstance(bit, FormulaBit):
return
for index, element in enumerate(bit.contents):
for processor in self.processors:
processor.process(bit.contents, index)
# continue with recursive processing
self.processinsides(element)
def traversewhole(self, formula):
"Traverse over the contents to alter variables and space units."
last = None
for bit, contents in self.traverse(formula):
if bit.type == 'alpha':
self.italicize(bit, contents)
elif bit.type == 'font' and last and last.type == 'number':
bit.contents.insert(0, FormulaConstant(u' '))
last = bit
def traverse(self, bit):
"Traverse a formula and yield a flattened structure of (bit, list) pairs."
for element in bit.contents:
if hasattr(element, 'type') and element.type:
yield (element, bit.contents)
elif isinstance(element, FormulaBit):
for pair in self.traverse(element):
yield pair
def italicize(self, bit, contents):
"Italicize the given bit of text."
index = contents.index(bit)
contents[index] = TaggedBit().complete([bit], 'i')
class Formula(Container):
"A LaTeX formula"
def __init__(self):
self.parser = FormulaParser()
self.output = TaggedOutput().settag('span class="formula"')
def process(self):
"Convert the formula to tags"
if self.header[0] == 'inline':
DocumentParameters.displaymode = False
else:
DocumentParameters.displaymode = True
self.output.settag('div class="formula"', True)
if Options.jsmath:
self.jsmath()
elif Options.mathjax:
self.mathjax()
elif Options.googlecharts:
self.googlecharts()
else:
self.classic()
def jsmath(self):
"Make the contents for jsMath."
if self.header[0] != 'inline':
self.output = TaggedOutput().settag('div class="math"')
else:
self.output = TaggedOutput().settag('span class="math"')
self.contents = [Constant(self.parsed)]
def mathjax(self):
"Make the contents for MathJax."
self.output.tag = 'span class="MathJax_Preview"'
tag = 'script type="math/tex'
if self.header[0] != 'inline':
tag += ';mode=display'
self.contents = [TaggedText().constant(self.parsed, tag + '"', True)]
def googlecharts(self):
"Make the contents using Google Charts http://code.google.com/apis/chart/."
url = FormulaConfig.urls['googlecharts'] + urllib.quote_plus(self.parsed)
img = '<img class="chart" src="' + url + '" alt="' + self.parsed + '"/>'
self.contents = [Constant(img)]
def classic(self):
"Make the contents using classic output generation with XHTML and CSS."
whole = FormulaFactory().parseformula(self.parsed)
FormulaProcessor().process(whole)
whole.parent = self
self.contents = [whole]
def parse(self, pos):
"Parse using a parse position instead of self.parser."
if pos.checkskip('$$'):
self.parsedollarblock(pos)
elif pos.checkskip('$'):
self.parsedollarinline(pos)
elif pos.checkskip('\\('):
self.parseinlineto(pos, '\\)')
elif pos.checkskip('\\['):
self.parseblockto(pos, '\\]')
else:
pos.error('Unparseable formula')
self.process()
return self
def parsedollarinline(self, pos):
"Parse a $...$ formula."
self.header = ['inline']
self.parsedollar(pos)
def parsedollarblock(self, pos):
"Parse a $$...$$ formula."
self.header = ['block']
self.parsedollar(pos)
if not pos.checkskip('$'):
pos.error('Formula should be $$...$$, but last $ is missing.')
def parsedollar(self, pos):
"Parse to the next $."
pos.pushending('$')
self.parsed = pos.globexcluding('$')
pos.popending('$')
def parseinlineto(self, pos, limit):
"Parse a \\(...\\) formula."
self.header = ['inline']
self.parseupto(pos, limit)
def parseblockto(self, pos, limit):
"Parse a \\[...\\] formula."
self.header = ['block']
self.parseupto(pos, limit)
def parseupto(self, pos, limit):
"Parse a formula that ends with the given command."
pos.pushending(limit)
self.parsed = pos.glob(lambda: True)
pos.popending(limit)
def __unicode__(self):
"Return a printable representation."
if self.partkey and self.partkey.number:
return 'Formula (' + self.partkey.number + ')'
return 'Unnumbered formula'
class WholeFormula(FormulaBit):
"Parse a whole formula"
def detect(self, pos):
"Not outside the formula is enough."
return not pos.finished()
def parsebit(self, pos):
"Parse with any formula bit"
while not pos.finished():
self.add(self.factory.parseany(pos))
class FormulaFactory(object):
"Construct bits of formula"
# bit types will be appended later
types = [FormulaSymbol, RawText, FormulaNumber, Bracket, Comment, WhiteSpace]
skippedtypes = [Comment, WhiteSpace]
defining = False
def __init__(self):
"Initialize the map of instances."
self.instances = dict()
def detecttype(self, type, pos):
"Detect a bit of a given type."
if pos.finished():
return False
return self.instance(type).detect(pos)
def instance(self, type):
"Get an instance of the given type."
if not type in self.instances or not self.instances[type]:
self.instances[type] = self.create(type)
return self.instances[type]
def create(self, type):
"Create a new formula bit of the given type."
return Cloner.create(type).setfactory(self)
def clearskipped(self, pos):
"Clear any skipped types."
while not pos.finished():
if not self.skipany(pos):
return
return
def skipany(self, pos):
"Skip any skipped types."
for type in self.skippedtypes:
if self.instance(type).detect(pos):
return self.parsetype(type, pos)
return None
def parseany(self, pos):
"Parse any formula bit at the current location."
for type in self.types + self.skippedtypes:
if self.detecttype(type, pos):
return self.parsetype(type, pos)
Trace.error('Unrecognized formula at ' + pos.identifier())
return FormulaConstant(pos.skipcurrent())
def parsetype(self, type, pos):
"Parse the given type and return it."
bit = self.instance(type)
self.instances[type] = None
returnedbit = bit.parsebit(pos)
if returnedbit:
return returnedbit.setfactory(self)
return bit
def parseformula(self, formula):
"Parse a string of text that contains a whole formula."
pos = TextPosition(formula)
whole = self.create(WholeFormula)
if whole.detect(pos):
whole.parsebit(pos)
return whole
# no formula found
if not pos.finished():
Trace.error('Unknown formula at: ' + pos.identifier())
whole.add(TaggedBit().constant(formula, 'span class="unknown"'))
return whole
import unicodedata
import gettext
class Translator(object):
"Reads the configuration file and tries to find a translation."
"Otherwise falls back to the messages in the config file."
instance = None
def translate(cls, key):
"Get the translated message for a key."
return cls.instance.getmessage(key)
translate = classmethod(translate)
def __init__(self):
self.translation = None
self.first = True
def findtranslation(self):
"Find the translation for the document language."
self.langcodes = None
if not DocumentParameters.language:
Trace.error('No language in document')
return
if not DocumentParameters.language in TranslationConfig.languages:
Trace.error('Unknown language ' + DocumentParameters.language)
return
if TranslationConfig.languages[DocumentParameters.language] == 'en':
return
langcodes = [TranslationConfig.languages[DocumentParameters.language]]
try:
self.translation = gettext.translation('elyxer', None, langcodes)
except IOError:
Trace.error('No translation for ' + unicode(langcodes))
def getmessage(self, key):
"Get the translated message for the given key."
if self.first:
self.findtranslation()
self.first = False
message = self.getuntranslated(key)
if not self.translation:
return message
try:
message = self.translation.ugettext(message)
except IOError:
pass
return message
def getuntranslated(self, key):
"Get the untranslated message."
if not key in TranslationConfig.constants:
Trace.error('Cannot translate ' + key)
return key
return TranslationConfig.constants[key]
Translator.instance = Translator()
class NumberCounter(object):
"A counter for numbers (by default)."
"The type can be changed to return letters, roman numbers..."
name = None
value = None
mode = None
master = None
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
symbols = NumberingConfig.sequence['symbols']
romannumerals = [
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5),
('IV', 4), ('I', 1)
]
def __init__(self, name):
"Give a name to the counter."
self.name = name
def setmode(self, mode):
"Set the counter mode. Can be changed at runtime."
self.mode = mode
return self
def init(self, value):
"Set an initial value."
self.value = value
def gettext(self):
"Get the next value as a text string."
return unicode(self.value)
def getletter(self):
"Get the next value as a letter."
return self.getsequence(self.letters)
def getsymbol(self):
"Get the next value as a symbol."
return self.getsequence(self.symbols)
def getsequence(self, sequence):
"Get the next value from elyxer.a sequence."
return sequence[(self.value - 1) % len(sequence)]
def getroman(self):
"Get the next value as a roman number."
result = ''
number = self.value
for numeral, value in self.romannumerals:
if number >= value:
result += numeral * (number / value)
number = number % value
return result
def getvalue(self):
"Get the current value as configured in the current mode."
if not self.mode or self.mode in ['text', '1']:
return self.gettext()
if self.mode == 'A':
return self.getletter()
if self.mode == 'a':
return self.getletter().lower()
if self.mode == 'I':
return self.getroman()
if self.mode == '*':
return self.getsymbol()
Trace.error('Unknown counter mode ' + self.mode)
return self.gettext()
def getnext(self):
"Increase the current value and get the next value as configured."
if not self.value:
self.value = 0
self.value += 1
return self.getvalue()
def reset(self):
"Reset the counter."
self.value = 0
def __unicode__(self):
"Return a printable representation."
result = 'Counter ' + self.name
if self.mode:
result += ' in mode ' + self.mode
return result
class DependentCounter(NumberCounter):
"A counter which depends on another one (the master)."
def setmaster(self, master):
"Set the master counter."
self.master = master
self.last = self.master.getvalue()
return self
def getnext(self):
"Increase or, if the master counter has changed, restart."
if self.last != self.master.getvalue():
self.reset()
value = NumberCounter.getnext(self)
self.last = self.master.getvalue()
return value
def getvalue(self):
"Get the value of the combined counter: master.dependent."
return self.master.getvalue() + '.' + NumberCounter.getvalue(self)
class NumberGenerator(object):
"A number generator for unique sequences and hierarchical structures. Used in:"
" * ordered part numbers: Chapter 3, Section 5.3."
" * unique part numbers: Footnote 15, Bibliography cite [15]."
" * chaptered part numbers: Figure 3.15, Equation (8.3)."
" * unique roman part numbers: Part I, Book IV."
chaptered = None
generator = None
romanlayouts = [x.lower() for x in NumberingConfig.layouts['roman']]
orderedlayouts = [x.lower() for x in NumberingConfig.layouts['ordered']]
counters = dict()
appendix = None
def deasterisk(self, type):
"Remove the possible asterisk in a layout type."
return type.replace('*', '')
def isunique(self, type):
"Find out if the layout type corresponds to a unique part."
return self.isroman(type)
def isroman(self, type):
"Find out if the layout type should have roman numeration."
return self.deasterisk(type).lower() in self.romanlayouts
def isinordered(self, type):
"Find out if the layout type corresponds to an (un)ordered part."
return self.deasterisk(type).lower() in self.orderedlayouts
def isnumbered(self, type):
"Find out if the type for a layout corresponds to a numbered layout."
if '*' in type:
return False
if self.isroman(type):
return True
if not self.isinordered(type):
return False
if self.getlevel(type) > DocumentParameters.maxdepth:
return False
return True
def isunordered(self, type):
"Find out if the type contains an asterisk, basically."
return '*' in type
def getlevel(self, type):
"Get the level that corresponds to a layout type."
if self.isunique(type):
return 0
if not self.isinordered(type):
Trace.error('Unknown layout type ' + type)
return 0
type = self.deasterisk(type).lower()
level = self.orderedlayouts.index(type) + 1
return level - DocumentParameters.startinglevel
def getparttype(self, type):
"Obtain the type for the part: without the asterisk, "
"and switched to Appendix if necessary."
if NumberGenerator.appendix and self.getlevel(type) == 1:
return 'Appendix'
return self.deasterisk(type)
def generate(self, type):
"Generate a number for a layout type."
"Unique part types such as Part or Book generate roman numbers: Part I."
"Ordered part types return dot-separated tuples: Chapter 5, Subsection 2.3.5."
"Everything else generates unique numbers: Bibliography [1]."
"Each invocation results in a new number."
return self.getcounter(type).getnext()
def getcounter(self, type):
"Get the counter for the given type."
type = type.lower()
if not type in self.counters:
self.counters[type] = self.create(type)
return self.counters[type]
def create(self, type):
"Create a counter for the given type."
if self.isnumbered(type) and self.getlevel(type) > 1:
index = self.orderedlayouts.index(type)
above = self.orderedlayouts[index - 1]
master = self.getcounter(above)
return self.createdependent(type, master)
counter = NumberCounter(type)
if self.isroman(type):
counter.setmode('I')
return counter
def getdependentcounter(self, type, master):
"Get (or create) a counter of the given type that depends on another."
if not type in self.counters or not self.counters[type].master:
self.counters[type] = self.createdependent(type, master)
return self.counters[type]
def createdependent(self, type, master):
"Create a dependent counter given the master."
return DependentCounter(type).setmaster(master)
def startappendix(self):
"Start appendices here."
firsttype = self.orderedlayouts[DocumentParameters.startinglevel]
counter = self.getcounter(firsttype)
counter.setmode('A').reset()
NumberGenerator.appendix = True
class ChapteredGenerator(NumberGenerator):
"Generate chaptered numbers, as in Chapter.Number."
"Used in equations, figures: Equation (5.3), figure 8.15."
def generate(self, type):
"Generate a number which goes with first-level numbers (chapters). "
"For the article classes a unique number is generated."
if DocumentParameters.startinglevel > 0:
return NumberGenerator.generator.generate(type)
chapter = self.getcounter('Chapter')
return self.getdependentcounter(type, chapter).getnext()
NumberGenerator.chaptered = ChapteredGenerator()
NumberGenerator.generator = NumberGenerator()
class ContainerSize(object):
"The size of a container."
width = None
height = None
maxwidth = None
maxheight = None
scale = None
def set(self, width = None, height = None):
"Set the proper size with width and height."
self.setvalue('width', width)
self.setvalue('height', height)
return self
def setmax(self, maxwidth = None, maxheight = None):
"Set max width and/or height."
self.setvalue('maxwidth', maxwidth)
self.setvalue('maxheight', maxheight)
return self
def readparameters(self, container):
"Read some size parameters off a container."
self.setparameter(container, 'width')
self.setparameter(container, 'height')
self.setparameter(container, 'scale')
self.checkvalidheight(container)
return self
def setparameter(self, container, name):
"Read a size parameter off a container, and set it if present."
value = container.getparameter(name)
self.setvalue(name, value)
def setvalue(self, name, value):
"Set the value of a parameter name, only if it's valid."
value = self.processparameter(value)
if value:
setattr(self, name, value)
def checkvalidheight(self, container):
"Check if the height parameter is valid; otherwise erase it."
heightspecial = container.getparameter('height_special')
if self.height and self.extractnumber(self.height) == '1' and heightspecial == 'totalheight':
self.height = None
def processparameter(self, value):
"Do the full processing on a parameter."
if not value:
return None
if self.extractnumber(value) == '0':
return None
for ignored in StyleConfig.size['ignoredtexts']:
if ignored in value:
value = value.replace(ignored, '')
return value
def extractnumber(self, text):
"Extract the first number in the given text."
result = ''
decimal = False
for char in text:
if char.isdigit():
result += char
elif char == '.' and not decimal:
result += char
decimal = True
else:
return result
return result
def checkimage(self, width, height):
"Check image dimensions, set them if possible."
if width:
self.maxwidth = unicode(width) + 'px'
if self.scale and not self.width:
self.width = self.scalevalue(width)
if height:
self.maxheight = unicode(height) + 'px'
if self.scale and not self.height:
self.height = self.scalevalue(height)
if self.width and not self.height:
self.height = 'auto'
if self.height and not self.width:
self.width = 'auto'
def scalevalue(self, value):
"Scale the value according to the image scale and return it as unicode."
scaled = value * int(self.scale) / 100
return unicode(int(scaled)) + 'px'
def removepercentwidth(self):
"Remove percent width if present, to set it at the figure level."
if not self.width:
return None
if not '%' in self.width:
return None
width = self.width
self.width = None
if self.height == 'auto':
self.height = None
return width
def addstyle(self, container):
"Add the proper style attribute to the output tag."
if not isinstance(container.output, TaggedOutput):
Trace.error('No tag to add style, in ' + unicode(container))
if not self.width and not self.height and not self.maxwidth and not self.maxheight:
# nothing to see here; move along
return
tag = ' style="'
tag += self.styleparameter('width')
tag += self.styleparameter('maxwidth')
tag += self.styleparameter('height')
tag += self.styleparameter('maxheight')
if tag[-1] == ' ':
tag = tag[:-1]
tag += '"'
container.output.tag += tag
def styleparameter(self, name):
"Get the style for a single parameter."
value = getattr(self, name)
if value:
return name.replace('max', 'max-') + ': ' + value + '; '
return ''
class QuoteContainer(Container):
"A container for a pretty quote"
def __init__(self):
self.parser = BoundedParser()
self.output = FixedOutput()
def process(self):
"Process contents"
self.type = self.header[2]
if not self.type in StyleConfig.quotes:
Trace.error('Quote type ' + self.type + ' not found')
self.html = ['"']
return
self.html = [StyleConfig.quotes[self.type]]
class LyXLine(Container):
"A Lyx line"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
self.html = ['<hr class="line" />']
class EmphaticText(TaggedText):
"Text with emphatic mode"
def process(self):
self.output.tag = 'i'
class ShapedText(TaggedText):
"Text shaped (italic, slanted)"
def process(self):
self.type = self.header[1]
if not self.type in TagConfig.shaped:
Trace.error('Unrecognized shape ' + self.header[1])
self.output.tag = 'span'
return
self.output.tag = TagConfig.shaped[self.type]
class VersalitasText(TaggedText):
"Text in versalitas"
def process(self):
self.output.tag = 'span class="versalitas"'
class ColorText(TaggedText):
"Colored text"
def process(self):
self.color = self.header[1]
self.output.tag = 'span class="' + self.color + '"'
class SizeText(TaggedText):
"Sized text"
def process(self):
self.size = self.header[1]
self.output.tag = 'span class="' + self.size + '"'
class BoldText(TaggedText):
"Bold text"
def process(self):
self.output.tag = 'b'
class TextFamily(TaggedText):
"A bit of text from elyxer.a different family"
def process(self):
"Parse the type of family"
self.type = self.header[1]
if not self.type in TagConfig.family:
Trace.error('Unrecognized family ' + type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.family[self.type]
class Hfill(TaggedText):
"Horizontall fill"
def process(self):
self.output.tag = 'span class="hfill"'
class BarredText(TaggedText):
"Text with a bar somewhere"
def process(self):
"Parse the type of bar"
self.type = self.header[1]
if not self.type in TagConfig.barred:
Trace.error('Unknown bar type ' + self.type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.barred[self.type]
class LangLine(BlackBox):
"A line with language information"
def process(self):
self.lang = self.header[1]
class InsetLength(BlackBox):
"A length measure inside an inset."
def process(self):
self.length = self.header[1]
class Space(Container):
"A space of several types"
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
self.type = self.header[2]
if self.type not in StyleConfig.hspaces:
Trace.error('Unknown space type ' + self.type)
self.html = [' ']
return
self.html = [StyleConfig.hspaces[self.type]]
length = self.getlength()
if not length:
return
self.output = TaggedOutput().settag('span class="hspace"', False)
ContainerSize().set(length).addstyle(self)
def getlength(self):
"Get the space length from elyxer.the contents or parameters."
if len(self.contents) == 0 or not isinstance(self.contents[0], InsetLength):
return None
return self.contents[0].length
class VerticalSpace(Container):
"An inset that contains a vertical space."
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
"Set the correct tag"
self.type = self.header[2]
if self.type not in StyleConfig.vspaces:
self.output = TaggedOutput().settag('div class="vspace" style="height: ' + self.type + ';"', True)
return
self.html = [StyleConfig.vspaces[self.type]]
class Align(Container):
"Bit of aligned text"
def __init__(self):
self.parser = ExcludingParser()
self.output = TaggedOutput().setbreaklines(True)
def process(self):
self.output.tag = 'div class="' + self.header[1] + '"'
class Newline(Container):
"A newline"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
"Process contents"
self.html = ['<br/>\n']
class NewPage(Newline):
"A new page"
def process(self):
"Process contents"
self.html = ['<p><br/>\n</p>\n']
class Separator(Container):
"A separator string which is not extracted by extracttext()."
def __init__(self, constant):
self.output = FixedOutput()
self.contents = []
self.html = [constant]
class StrikeOut(TaggedText):
"Striken out text."
def process(self):
"Set the output tag to strike."
self.output.tag = 'strike'
class StartAppendix(BlackBox):
"Mark to start an appendix here."
"From this point on, all chapters become appendices."
def process(self):
"Activate the special numbering scheme for appendices, using letters."
NumberGenerator.generator.startappendix()
class Link(Container):
"A link to another part of the document"
anchor = None
url = None
type = None
page = None
target = None
destination = None
title = None
def __init__(self):
"Initialize the link, add target if configured."
self.contents = []
self.parser = InsetParser()
self.output = LinkOutput()
if Options.target:
self.target = Options.target
def complete(self, text, anchor = None, url = None, type = None, title = None):
"Complete the link."
self.contents = [Constant(text)]
if anchor:
self.anchor = anchor
if url:
self.url = url
if type:
self.type = type
if title:
self.title = title
return self
def computedestination(self):
"Use the destination link to fill in the destination URL."
if not self.destination:
return
self.url = ''
if self.destination.anchor:
self.url = '#' + self.destination.anchor
if self.destination.page:
self.url = self.destination.page + self.url
def setmutualdestination(self, destination):
"Set another link as destination, and set its destination to this one."
self.destination = destination
destination.destination = self
def __unicode__(self):
"Return a printable representation."
result = 'Link'
if self.anchor:
result += ' #' + self.anchor
if self.url:
result += ' to ' + self.url
return result
class URL(Link):
"A clickable URL"
def process(self):
"Read URL from elyxer.parameters"
target = self.escape(self.getparameter('target'))
self.url = target
type = self.getparameter('type')
if type:
self.url = self.escape(type) + target
name = self.getparameter('name')
if not name:
name = target
self.contents = [Constant(name)]
class FlexURL(URL):
"A flexible URL"
def process(self):
"Read URL from elyxer.contents"
self.url = self.extracttext()
class LinkOutput(ContainerOutput):
"A link pointing to some destination"
"Or an anchor (destination)"
def gethtml(self, link):
"Get the HTML code for the link"
type = link.__class__.__name__
if link.type:
type = link.type
tag = 'a class="' + type + '"'
if link.anchor:
tag += ' name="' + link.anchor + '"'
if link.destination:
link.computedestination()
if link.url:
tag += ' href="' + link.url + '"'
if link.target:
tag += ' target="' + link.target + '"'
if link.title:
tag += ' title="' + link.title + '"'
return TaggedOutput().settag(tag).gethtml(link)
class Postprocessor(object):
"Postprocess a container keeping some context"
stages = []
def __init__(self):
self.stages = StageDict(Postprocessor.stages, self)
self.current = None
self.last = None
def postprocess(self, next):
"Postprocess a container and its contents."
self.postrecursive(self.current)
result = self.postcurrent(next)
self.last = self.current
self.current = next
return result
def postrecursive(self, container):
"Postprocess the container contents recursively"
if not hasattr(container, 'contents'):
return
if len(container.contents) == 0:
return
if hasattr(container, 'postprocess'):
if not container.postprocess:
return
postprocessor = Postprocessor()
contents = []
for element in container.contents:
post = postprocessor.postprocess(element)
if post:
contents.append(post)
# two rounds to empty the pipeline
for i in range(2):
post = postprocessor.postprocess(None)
if post:
contents.append(post)
container.contents = contents
def postcurrent(self, next):
"Postprocess the current element taking into account next and last."
stage = self.stages.getstage(self.current)
if not stage:
return self.current
return stage.postprocess(self.last, self.current, next)
class StageDict(object):
"A dictionary of stages corresponding to classes"
def __init__(self, classes, postprocessor):
"Instantiate an element from elyxer.each class and store as a dictionary"
instances = self.instantiate(classes, postprocessor)
self.stagedict = dict([(x.processedclass, x) for x in instances])
def instantiate(self, classes, postprocessor):
"Instantiate an element from elyxer.each class"
stages = [x.__new__(x) for x in classes]
for element in stages:
element.__init__()
element.postprocessor = postprocessor
return stages
def getstage(self, element):
"Get the stage for a given element, if the type is in the dict"
if not element.__class__ in self.stagedict:
return None
return self.stagedict[element.__class__]
class Label(Link):
"A label to be referenced"
names = dict()
lastlayout = None
def __init__(self):
Link.__init__(self)
self.lastnumbered = None
def process(self):
"Process a label container."
key = self.getparameter('name')
self.create(' ', key)
self.lastnumbered = Label.lastlayout
def create(self, text, key, type = 'Label'):
"Create the label for a given key."
self.key = key
self.complete(text, anchor = key, type = type)
Label.names[key] = self
if key in Reference.references:
for reference in Reference.references[key]:
reference.destination = self
return self
def findpartkey(self):
"Get the part key for the latest numbered container seen."
numbered = self.numbered(self)
if numbered and numbered.partkey:
return numbered.partkey
return ''
def numbered(self, container):
"Get the numbered container for the label."
if container.partkey:
return container
if not container.parent:
if self.lastnumbered:
return self.lastnumbered
return None
return self.numbered(container.parent)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self, 'key'):
return 'Unnamed label'
return 'Label ' + self.key
class Reference(Link):
"A reference to a label."
references = dict()
key = 'none'
def process(self):
"Read the reference and set the arrow."
self.key = self.getparameter('reference')
if self.key in Label.names:
self.direction = u'↑'
label = Label.names[self.key]
else:
self.direction = u'↓'
label = Label().complete(' ', self.key, 'preref')
self.destination = label
self.formatcontents()
if not self.key in Reference.references:
Reference.references[self.key] = []
Reference.references[self.key].append(self)
def formatcontents(self):
"Format the reference contents."
formatkey = self.getparameter('LatexCommand')
if not formatkey:
formatkey = 'ref'
self.formatted = u'↕'
if formatkey in StyleConfig.referenceformats:
self.formatted = StyleConfig.referenceformats[formatkey]
else:
Trace.error('Unknown reference format ' + formatkey)
self.replace(u'↕', self.direction)
self.replace('#', '1')
self.replace('on-page', Translator.translate('on-page'))
partkey = self.destination.findpartkey()
# only if partkey and partkey.number are not null, send partkey.number
self.replace('@', partkey and partkey.number)
self.replace(u'¶', partkey and partkey.tocentry)
if not '$' in self.formatted or not partkey or not partkey.titlecontents:
if '$' in self.formatted:
Trace.error('No title in ' + unicode(partkey))
self.contents = [Constant(self.formatted)]
return
pieces = self.formatted.split('$')
self.contents = [Constant(pieces[0])]
for piece in pieces[1:]:
self.contents += partkey.titlecontents
self.contents.append(Constant(piece))
def replace(self, key, value):
"Replace a key in the format template with a value."
if not key in self.formatted:
return
if not value:
value = ''
self.formatted = self.formatted.replace(key, value)
def __unicode__(self):
"Return a printable representation."
return 'Reference ' + self.key
class FormulaCommand(FormulaBit):
"A LaTeX command inside a formula"
types = []
start = FormulaConfig.starts['command']
commandmap = None
def detect(self, pos):
"Find the current command."
return pos.checkfor(FormulaCommand.start)
def parsebit(self, pos):
"Parse the command."
command = self.extractcommand(pos)
bit = self.parsewithcommand(command, pos)
if bit:
return bit
if command.startswith('\\up') or command.startswith('\\Up'):
upgreek = self.parseupgreek(command, pos)
if upgreek:
return upgreek
if not self.factory.defining:
Trace.error('Unknown command ' + command)
self.output = TaggedOutput().settag('span class="unknown"')
self.add(FormulaConstant(command))
return None
def parsewithcommand(self, command, pos):
"Parse the command type once we have the command."
for type in FormulaCommand.types:
if command in type.commandmap:
return self.parsecommandtype(command, type, pos)
return None
def parsecommandtype(self, command, type, pos):
"Parse a given command type."
bit = self.factory.create(type)
bit.setcommand(command)
returned = bit.parsebit(pos)
if returned:
return returned
return bit
def extractcommand(self, pos):
"Extract the command from elyxer.the current position."
if not pos.checkskip(FormulaCommand.start):
pos.error('Missing command start ' + FormulaCommand.start)
return
if pos.finished():
return self.emptycommand(pos)
if pos.current().isalpha():
# alpha command
command = FormulaCommand.start + pos.globalpha()
# skip mark of short command
pos.checkskip('*')
return command
# symbol command
return FormulaCommand.start + pos.skipcurrent()
def emptycommand(self, pos):
"""Check for an empty command: look for command disguised as ending.
Special case against '{ \{ \} }' situation."""
command = ''
if not pos.isout():
ending = pos.nextending()
if ending and pos.checkskip(ending):
command = ending
return FormulaCommand.start + command
def parseupgreek(self, command, pos):
"Parse the Greek \\up command.."
if len(command) < 4:
return None
if command.startswith('\\up'):
upcommand = '\\' + command[3:]
elif pos.checkskip('\\Up'):
upcommand = '\\' + command[3:4].upper() + command[4:]
else:
Trace.error('Impossible upgreek command: ' + command)
return
upgreek = self.parsewithcommand(upcommand, pos)
if upgreek:
upgreek.type = 'font'
return upgreek
class CommandBit(FormulaCommand):
"A formula bit that includes a command"
def setcommand(self, command):
"Set the command in the bit"
self.command = command
if self.commandmap:
self.original += command
self.translated = self.commandmap[self.command]
def parseparameter(self, pos):
"Parse a parameter at the current position"
self.factory.clearskipped(pos)
if pos.finished():
return None
parameter = self.factory.parseany(pos)
self.add(parameter)
return parameter
def parsesquare(self, pos):
"Parse a square bracket"
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = self.factory.parsetype(SquareBracket, pos)
self.add(bracket)
return bracket
def parseliteral(self, pos):
"Parse a literal bracket."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
if not pos.isvalue():
Trace.error('No literal parameter found at: ' + pos.identifier())
return None
return pos.globvalue()
bracket = Bracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsesquareliteral(self, pos):
"Parse a square bracket literally."
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = SquareBracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsetext(self, pos):
"Parse a text parameter."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
Trace.error('No text parameter for ' + self.command)
return None
bracket = Bracket().setfactory(self.factory).parsetext(pos)
self.add(bracket)
return bracket
class EmptyCommand(CommandBit):
"An empty command (without parameters)"
commandmap = FormulaConfig.commands
def parsebit(self, pos):
"Parse a command without parameters"
self.contents = [FormulaConstant(self.translated)]
class SpacedCommand(CommandBit):
"An empty command which should have math spacing in formulas."
commandmap = FormulaConfig.spacedcommands
def parsebit(self, pos):
"Place as contents the command translated and spaced."
self.contents = [FormulaConstant(u' ' + self.translated + u' ')]
class AlphaCommand(EmptyCommand):
"A command without paramters whose result is alphabetical"
commandmap = FormulaConfig.alphacommands
def parsebit(self, pos):
"Parse the command and set type to alpha"
EmptyCommand.parsebit(self, pos)
self.type = 'alpha'
class OneParamFunction(CommandBit):
"A function of one parameter"
commandmap = FormulaConfig.onefunctions
simplified = False
def parsebit(self, pos):
"Parse a function with one parameter"
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
self.simplifyifpossible()
def simplifyifpossible(self):
"Try to simplify to a single character."
if self.original in self.commandmap:
self.output = FixedOutput()
self.html = [self.commandmap[self.original]]
self.simplified = True
class SymbolFunction(CommandBit):
"Find a function which is represented by a symbol (like _ or ^)"
commandmap = FormulaConfig.symbolfunctions
def detect(self, pos):
"Find the symbol"
return pos.current() in SymbolFunction.commandmap
def parsebit(self, pos):
"Parse the symbol"
self.setcommand(pos.current())
pos.skip(self.command)
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
class TextFunction(CommandBit):
"A function where parameters are read as text."
commandmap = FormulaConfig.textfunctions
def parsebit(self, pos):
"Parse a text parameter"
self.output = TaggedOutput().settag(self.translated)
self.parsetext(pos)
def process(self):
"Set the type to font"
self.type = 'font'
class LabelFunction(CommandBit):
"A function that acts as a label"
commandmap = FormulaConfig.labelfunctions
def parsebit(self, pos):
"Parse a literal parameter"
self.key = self.parseliteral(pos)
def process(self):
"Add an anchor with the label contents."
self.type = 'font'
self.label = Label().create(' ', self.key, type = 'eqnumber')
self.contents = [self.label]
# store as a Label so we know it's been seen
Label.names[self.key] = self.label
class FontFunction(OneParamFunction):
"A function of one parameter that changes the font"
commandmap = FormulaConfig.fontfunctions
def process(self):
"Simplify if possible using a single character."
self.type = 'font'
self.simplifyifpossible()
FormulaFactory.types += [FormulaCommand, SymbolFunction]
FormulaCommand.types = [
AlphaCommand, EmptyCommand, OneParamFunction, FontFunction, LabelFunction,
TextFunction, SpacedCommand,
]
class BigSymbol(object):
"A big symbol generator."
symbols = FormulaConfig.bigsymbols
def __init__(self, symbol):
"Create the big symbol."
self.symbol = symbol
def getpieces(self):
"Get an array with all pieces."
if not self.symbol in self.symbols:
return [self.symbol]
if self.smalllimit():
return [self.symbol]
return self.symbols[self.symbol]
def smalllimit(self):
"Decide if the limit should be a small, one-line symbol."
if not DocumentParameters.displaymode:
return True
if len(self.symbols[self.symbol]) == 1:
return True
return Options.simplemath
class BigBracket(BigSymbol):
"A big bracket generator."
def __init__(self, size, bracket, alignment='l'):
"Set the size and symbol for the bracket."
self.size = size
self.original = bracket
self.alignment = alignment
self.pieces = None
if bracket in FormulaConfig.bigbrackets:
self.pieces = FormulaConfig.bigbrackets[bracket]
def getpiece(self, index):
"Return the nth piece for the bracket."
function = getattr(self, 'getpiece' + unicode(len(self.pieces)))
return function(index)
def getpiece1(self, index):
"Return the only piece for a single-piece bracket."
return self.pieces[0]
def getpiece3(self, index):
"Get the nth piece for a 3-piece bracket: parenthesis or square bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[-1]
return self.pieces[1]
def getpiece4(self, index):
"Get the nth piece for a 4-piece bracket: curly bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[3]
if index == (self.size - 1)/2:
return self.pieces[2]
return self.pieces[1]
def getcell(self, index):
"Get the bracket piece as an array cell."
piece = self.getpiece(index)
span = 'span class="bracket align-' + self.alignment + '"'
return TaggedBit().constant(piece, span)
def getcontents(self):
"Get the bracket as an array or as a single bracket."
if self.size == 1 or not self.pieces:
return self.getsinglebracket()
rows = []
for index in range(self.size):
cell = self.getcell(index)
rows.append(TaggedBit().complete([cell], 'span class="arrayrow"'))
return [TaggedBit().complete(rows, 'span class="array"')]
def getsinglebracket(self):
"Return the bracket as a single sign."
if self.original == '.':
return [TaggedBit().constant('', 'span class="emptydot"')]
return [TaggedBit().constant(self.original, 'span class="symbol"')]
class FormulaEquation(CommandBit):
"A simple numbered equation."
piece = 'equation'
def parsebit(self, pos):
"Parse the array"
self.output = ContentsOutput()
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaCell(FormulaCommand):
"An array cell inside a row"
def setalignment(self, alignment):
self.alignment = alignment
self.output = TaggedOutput().settag('span class="arraycell align-' + alignment +'"', True)
return self
def parsebit(self, pos):
self.factory.clearskipped(pos)
if pos.finished():
return
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaRow(FormulaCommand):
"An array row inside an array"
cellseparator = FormulaConfig.array['cellseparator']
def setalignments(self, alignments):
self.alignments = alignments
self.output = TaggedOutput().settag('span class="arrayrow"', True)
return self
def parsebit(self, pos):
"Parse a whole row"
index = 0
pos.pushending(self.cellseparator, optional=True)
while not pos.finished():
cell = self.createcell(index)
cell.parsebit(pos)
self.add(cell)
index += 1
pos.checkskip(self.cellseparator)
if len(self.contents) == 0:
self.output = EmptyOutput()
def createcell(self, index):
"Create the cell that corresponds to the given index."
alignment = self.alignments[index % len(self.alignments)]
return self.factory.create(FormulaCell).setalignment(alignment)
class MultiRowFormula(CommandBit):
"A formula with multiple rows."
def parserows(self, pos):
"Parse all rows, finish when no more row ends"
self.rows = []
first = True
for row in self.iteraterows(pos):
if first:
first = False
else:
# intersparse empty rows
self.addempty()
row.parsebit(pos)
self.addrow(row)
self.size = len(self.rows)
def iteraterows(self, pos):
"Iterate over all rows, end when no more row ends"
rowseparator = FormulaConfig.array['rowseparator']
while True:
pos.pushending(rowseparator, True)
row = self.factory.create(FormulaRow)
yield row.setalignments(self.alignments)
if pos.checkfor(rowseparator):
self.original += pos.popending(rowseparator)
else:
return
def addempty(self):
"Add an empty row."
row = self.factory.create(FormulaRow).setalignments(self.alignments)
for index, originalcell in enumerate(self.rows[-1].contents):
cell = row.createcell(index)
cell.add(FormulaConstant(u' '))
row.add(cell)
self.addrow(row)
def addrow(self, row):
"Add a row to the contents and to the list of rows."
self.rows.append(row)
self.add(row)
class FormulaArray(MultiRowFormula):
"An array within a formula"
piece = 'array'
def parsebit(self, pos):
"Parse the array"
self.output = TaggedOutput().settag('span class="array"', False)
self.parsealignments(pos)
self.parserows(pos)
def parsealignments(self, pos):
"Parse the different alignments"
# vertical
self.valign = 'c'
literal = self.parsesquareliteral(pos)
if literal:
self.valign = literal
# horizontal
literal = self.parseliteral(pos)
self.alignments = []
for l in literal:
self.alignments.append(l)
class FormulaMatrix(MultiRowFormula):
"A matrix (array with center alignment)."
piece = 'matrix'
def parsebit(self, pos):
"Parse the matrix, set alignments to 'c'."
self.output = TaggedOutput().settag('span class="array"', False)
self.valign = 'c'
self.alignments = ['c']
self.parserows(pos)
class FormulaCases(MultiRowFormula):
"A cases statement"
piece = 'cases'
def parsebit(self, pos):
"Parse the cases"
self.output = ContentsOutput()
self.alignments = ['l', 'l']
self.parserows(pos)
for row in self.contents:
for cell in row.contents:
cell.output.settag('span class="case align-l"', True)
cell.contents.append(FormulaConstant(u' '))
array = TaggedBit().complete(self.contents, 'span class="bracketcases"', True)
brace = BigBracket(len(self.contents), '{', 'l')
self.contents = brace.getcontents() + [array]
class EquationEnvironment(MultiRowFormula):
"A \\begin{}...\\end equation environment with rows and cells."
def parsebit(self, pos):
"Parse the whole environment."
self.output = TaggedOutput().settag('span class="environment"', False)
environment = self.piece.replace('*', '')
if environment in FormulaConfig.environments:
self.alignments = FormulaConfig.environments[environment]
else:
Trace.error('Unknown equation environment ' + self.piece)
self.alignments = ['l']
self.parserows(pos)
class BeginCommand(CommandBit):
"A \\begin{}...\end command and what it entails (array, cases, aligned)"
commandmap = {FormulaConfig.array['begin']:''}
types = [FormulaEquation, FormulaArray, FormulaCases, FormulaMatrix]
def parsebit(self, pos):
"Parse the begin command"
command = self.parseliteral(pos)
bit = self.findbit(command)
ending = FormulaConfig.array['end'] + '{' + command + '}'
pos.pushending(ending)
bit.parsebit(pos)
self.add(bit)
self.original += pos.popending(ending)
self.size = bit.size
def findbit(self, piece):
"Find the command bit corresponding to the \\begin{piece}"
for type in BeginCommand.types:
if piece.replace('*', '') == type.piece:
return self.factory.create(type)
bit = self.factory.create(EquationEnvironment)
bit.piece = piece
return bit
FormulaCommand.types += [BeginCommand]
class CombiningFunction(OneParamFunction):
commandmap = FormulaConfig.combiningfunctions
def parsebit(self, pos):
"Parse a combining function."
self.type = 'alpha'
combining = self.translated
parameter = self.parsesingleparameter(pos)
if not parameter:
Trace.error('Empty parameter for combining function ' + self.command)
elif len(parameter.extracttext()) != 1:
Trace.error('Applying combining function ' + self.command + ' to invalid string "' + parameter.extracttext() + '"')
self.contents.append(Constant(combining))
def parsesingleparameter(self, pos):
"Parse a parameter, or a single letter."
self.factory.clearskipped(pos)
if pos.finished():
Trace.error('Error while parsing single parameter at ' + pos.identifier())
return None
if self.factory.detecttype(Bracket, pos) \
or self.factory.detecttype(FormulaCommand, pos):
return self.parseparameter(pos)
letter = FormulaConstant(pos.skipcurrent())
self.add(letter)
return letter
class DecoratingFunction(OneParamFunction):
"A function that decorates some bit of text"
commandmap = FormulaConfig.decoratingfunctions
def parsebit(self, pos):
"Parse a decorating function"
self.type = 'alpha'
symbol = self.translated
self.symbol = TaggedBit().constant(symbol, 'span class="symbolover"')
self.parameter = self.parseparameter(pos)
self.output = TaggedOutput().settag('span class="withsymbol"')
self.contents.insert(0, self.symbol)
self.parameter.output = TaggedOutput().settag('span class="undersymbol"')
self.simplifyifpossible()
class LimitCommand(EmptyCommand):
"A command which accepts limits above and below, in display mode."
commandmap = FormulaConfig.limitcommands
def parsebit(self, pos):
"Parse a limit command."
pieces = BigSymbol(self.translated).getpieces()
self.output = TaggedOutput().settag('span class="limits"')
for piece in pieces:
self.contents.append(TaggedBit().constant(piece, 'span class="limit"'))
class LimitPreviousCommand(LimitCommand):
"A command to limit the previous command."
commandmap = None
def parsebit(self, pos):
"Do nothing."
self.output = TaggedOutput().settag('span class="limits"')
self.factory.clearskipped(pos)
def __unicode__(self):
"Return a printable representation."
return 'Limit previous command'
class LimitsProcessor(MathsProcessor):
"A processor for limits inside an element."
def process(self, contents, index):
"Process the limits for an element."
if Options.simplemath:
return
if self.checklimits(contents, index):
self.modifylimits(contents, index)
if self.checkscript(contents, index) and self.checkscript(contents, index + 1):
self.modifyscripts(contents, index)
def checklimits(self, contents, index):
"Check if the current position has a limits command."
if not DocumentParameters.displaymode:
return False
if self.checkcommand(contents, index + 1, LimitPreviousCommand):
self.limitsahead(contents, index)
return False
if not isinstance(contents[index], LimitCommand):
return False
return self.checkscript(contents, index + 1)
def limitsahead(self, contents, index):
"Limit the current element based on the next."
contents[index + 1].add(contents[index].clone())
contents[index].output = EmptyOutput()
def modifylimits(self, contents, index):
"Modify a limits commands so that the limits appear above and below."
limited = contents[index]
subscript = self.getlimit(contents, index + 1)
limited.contents.append(subscript)
if self.checkscript(contents, index + 1):
superscript = self.getlimit(contents, index + 1)
else:
superscript = TaggedBit().constant(u' ', 'sup class="limit"')
limited.contents.insert(0, superscript)
def getlimit(self, contents, index):
"Get the limit for a limits command."
limit = self.getscript(contents, index)
limit.output.tag = limit.output.tag.replace('script', 'limit')
return limit
def modifyscripts(self, contents, index):
"Modify the super- and subscript to appear vertically aligned."
subscript = self.getscript(contents, index)
# subscript removed so instead of index + 1 we get index again
superscript = self.getscript(contents, index)
scripts = TaggedBit().complete([superscript, subscript], 'span class="scripts"')
contents.insert(index, scripts)
def checkscript(self, contents, index):
"Check if the current element is a sub- or superscript."
return self.checkcommand(contents, index, SymbolFunction)
def checkcommand(self, contents, index, type):
"Check for the given type as the current element."
if len(contents) <= index:
return False
return isinstance(contents[index], type)
def getscript(self, contents, index):
"Get the sub- or superscript."
bit = contents[index]
bit.output.tag += ' class="script"'
del contents[index]
return bit
class BracketCommand(OneParamFunction):
"A command which defines a bracket."
commandmap = FormulaConfig.bracketcommands
def parsebit(self, pos):
"Parse the bracket."
OneParamFunction.parsebit(self, pos)
def create(self, direction, character):
"Create the bracket for the given character."
self.original = character
self.command = '\\' + direction
self.contents = [FormulaConstant(character)]
return self
class BracketProcessor(MathsProcessor):
"A processor for bracket commands."
def process(self, contents, index):
"Convert the bracket using Unicode pieces, if possible."
if Options.simplemath:
return
if self.checkleft(contents, index):
return self.processleft(contents, index)
def processleft(self, contents, index):
"Process a left bracket."
rightindex = self.findright(contents, index + 1)
if not rightindex:
return
size = self.findmax(contents, index, rightindex)
self.resize(contents[index], size)
self.resize(contents[rightindex], size)
def checkleft(self, contents, index):
"Check if the command at the given index is left."
return self.checkdirection(contents[index], '\\left')
def checkright(self, contents, index):
"Check if the command at the given index is right."
return self.checkdirection(contents[index], '\\right')
def checkdirection(self, bit, command):
"Check if the given bit is the desired bracket command."
if not isinstance(bit, BracketCommand):
return False
return bit.command == command
def findright(self, contents, index):
"Find the right bracket starting at the given index, or 0."
depth = 1
while index < len(contents):
if self.checkleft(contents, index):
depth += 1
if self.checkright(contents, index):
depth -= 1
if depth == 0:
return index
index += 1
return None
def findmax(self, contents, leftindex, rightindex):
"Find the max size of the contents between the two given indices."
sliced = contents[leftindex:rightindex]
return max([element.size for element in sliced])
def resize(self, command, size):
"Resize a bracket command to the given size."
character = command.extracttext()
alignment = command.command.replace('\\', '')
bracket = BigBracket(size, character, alignment)
command.output = ContentsOutput()
command.contents = bracket.getcontents()
FormulaCommand.types += [
DecoratingFunction, CombiningFunction, LimitCommand, BracketCommand,
]
FormulaProcessor.processors += [
LimitsProcessor(), BracketProcessor(),
]
class ParameterDefinition(object):
"The definition of a parameter in a hybrid function."
"[] parameters are optional, {} parameters are mandatory."
"Each parameter has a one-character name, like {$1} or {$p}."
"A parameter that ends in ! like {$p!} is a literal."
"Example: [$1]{$p!} reads an optional parameter $1 and a literal mandatory parameter p."
parambrackets = [('[', ']'), ('{', '}')]
def __init__(self):
self.name = None
self.literal = False
self.optional = False
self.value = None
self.literalvalue = None
def parse(self, pos):
"Parse a parameter definition: [$0], {$x}, {$1!}..."
for (opening, closing) in ParameterDefinition.parambrackets:
if pos.checkskip(opening):
if opening == '[':
self.optional = True
if not pos.checkskip('$'):
Trace.error('Wrong parameter name, did you mean $' + pos.current() + '?')
return None
self.name = pos.skipcurrent()
if pos.checkskip('!'):
self.literal = True
if not pos.checkskip(closing):
Trace.error('Wrong parameter closing ' + pos.skipcurrent())
return None
return self
Trace.error('Wrong character in parameter template: ' + pos.skipcurrent())
return None
def read(self, pos, function):
"Read the parameter itself using the definition."
if self.literal:
if self.optional:
self.literalvalue = function.parsesquareliteral(pos)
else:
self.literalvalue = function.parseliteral(pos)
if self.literalvalue:
self.value = FormulaConstant(self.literalvalue)
elif self.optional:
self.value = function.parsesquare(pos)
else:
self.value = function.parseparameter(pos)
def __unicode__(self):
"Return a printable representation."
result = 'param ' + self.name
if self.value:
result += ': ' + unicode(self.value)
else:
result += ' (empty)'
return result
class ParameterFunction(CommandBit):
"A function with a variable number of parameters defined in a template."
"The parameters are defined as a parameter definition."
def readparams(self, readtemplate, pos):
"Read the params according to the template."
self.params = dict()
for paramdef in self.paramdefs(readtemplate):
paramdef.read(pos, self)
self.params['$' + paramdef.name] = paramdef
def paramdefs(self, readtemplate):
"Read each param definition in the template"
pos = TextPosition(readtemplate)
while not pos.finished():
paramdef = ParameterDefinition().parse(pos)
if paramdef:
yield paramdef
def getparam(self, name):
"Get a parameter as parsed."
if not name in self.params:
return None
return self.params[name]
def getvalue(self, name):
"Get the value of a parameter."
return self.getparam(name).value
def getliteralvalue(self, name):
"Get the literal value of a parameter."
param = self.getparam(name)
if not param or not param.literalvalue:
return None
return param.literalvalue
class HybridFunction(ParameterFunction):
"""
A parameter function where the output is also defined using a template.
The template can use a number of functions; each function has an associated
tag.
Example: [f0{$1},span class="fbox"] defines a function f0 which corresponds
to a span of class fbox, yielding <span class="fbox">$1</span>.
Literal parameters can be used in tags definitions:
[f0{$1},span style="color: $p;"]
yields <span style="color: $p;">$1</span>, where $p is a literal parameter.
Sizes can be specified in hybridsizes, e.g. adding parameter sizes. By
default the resulting size is the max of all arguments. Sizes are used
to generate the right parameters.
A function followed by a single / is output as a self-closing XHTML tag:
[f0/,hr]
will generate <hr/>.
"""
commandmap = FormulaConfig.hybridfunctions
def parsebit(self, pos):
"Parse a function with [] and {} parameters"
readtemplate = self.translated[0]
writetemplate = self.translated[1]
self.readparams(readtemplate, pos)
self.contents = self.writeparams(writetemplate)
self.computehybridsize()
def writeparams(self, writetemplate):
"Write all params according to the template"
return self.writepos(TextPosition(writetemplate))
def writepos(self, pos):
"Write all params as read in the parse position."
result = []
while not pos.finished():
if pos.checkskip('$'):
param = self.writeparam(pos)
if param:
result.append(param)
elif pos.checkskip('f'):
function = self.writefunction(pos)
if function:
function.type = None
result.append(function)
elif pos.checkskip('('):
result.append(self.writebracket('left', '('))
elif pos.checkskip(')'):
result.append(self.writebracket('right', ')'))
else:
result.append(FormulaConstant(pos.skipcurrent()))
return result
def writeparam(self, pos):
"Write a single param of the form $0, $x..."
name = '$' + pos.skipcurrent()
if not name in self.params:
Trace.error('Unknown parameter ' + name)
return None
if not self.params[name]:
return None
if pos.checkskip('.'):
self.params[name].value.type = pos.globalpha()
return self.params[name].value
def writefunction(self, pos):
"Write a single function f0,...,fn."
tag = self.readtag(pos)
if not tag:
return None
if pos.checkskip('/'):
# self-closing XHTML tag, such as <hr/>
return TaggedBit().selfcomplete(tag)
if not pos.checkskip('{'):
Trace.error('Function should be defined in {}')
return None
pos.pushending('}')
contents = self.writepos(pos)
pos.popending()
if len(contents) == 0:
return None
return TaggedBit().complete(contents, tag)
def readtag(self, pos):
"Get the tag corresponding to the given index. Does parameter substitution."
if not pos.current().isdigit():
Trace.error('Function should be f0,...,f9: f' + pos.current())
return None
index = int(pos.skipcurrent())
if 2 + index > len(self.translated):
Trace.error('Function f' + unicode(index) + ' is not defined')
return None
tag = self.translated[2 + index]
if not '$' in tag:
return tag
for variable in self.params:
if variable in tag:
param = self.params[variable]
if not param.literal:
Trace.error('Parameters in tag ' + tag + ' should be literal: {' + variable + '!}')
continue
if param.literalvalue:
value = param.literalvalue
else:
value = ''
tag = tag.replace(variable, value)
return tag
def writebracket(self, direction, character):
"Return a new bracket looking at the given direction."
return self.factory.create(BracketCommand).create(direction, character)
def computehybridsize(self):
"Compute the size of the hybrid function."
if not self.command in HybridSize.configsizes:
self.computesize()
return
self.size = HybridSize().getsize(self)
# set the size in all elements at first level
for element in self.contents:
element.size = self.size
class HybridSize(object):
"The size associated with a hybrid function."
configsizes = FormulaConfig.hybridsizes
def getsize(self, function):
"Read the size for a function and parse it."
sizestring = self.configsizes[function.command]
for name in function.params:
if name in sizestring:
size = function.params[name].value.computesize()
sizestring = sizestring.replace(name, unicode(size))
if '$' in sizestring:
Trace.error('Unconverted variable in hybrid size: ' + sizestring)
return 1
return eval(sizestring)
FormulaCommand.types += [HybridFunction]
class HeaderParser(Parser):
"Parses the LyX header"
def parse(self, reader):
"Parse header parameters into a dictionary, return the preamble."
contents = []
self.parseending(reader, lambda: self.parseline(reader, contents))
# skip last line
reader.nextline()
return contents
def parseline(self, reader, contents):
"Parse a single line as a parameter or as a start"
line = reader.currentline()
if line.startswith(HeaderConfig.parameters['branch']):
self.parsebranch(reader)
return
elif line.startswith(HeaderConfig.parameters['lstset']):
LstParser().parselstset(reader)
return
elif line.startswith(HeaderConfig.parameters['beginpreamble']):
contents.append(self.factory.createcontainer(reader))
return
# no match
self.parseparameter(reader)
def parsebranch(self, reader):
"Parse all branch definitions."
branch = reader.currentline().split()[1]
reader.nextline()
subparser = HeaderParser().complete(HeaderConfig.parameters['endbranch'])
subparser.parse(reader)
options = BranchOptions(branch)
for key in subparser.parameters:
options.set(key, subparser.parameters[key])
Options.branches[branch] = options
def complete(self, ending):
"Complete the parser with the given ending."
self.ending = ending
return self
class PreambleParser(Parser):
"A parser for the LyX preamble."
preamble = []
def parse(self, reader):
"Parse the full preamble with all statements."
self.ending = HeaderConfig.parameters['endpreamble']
self.parseending(reader, lambda: self.parsepreambleline(reader))
return []
def parsepreambleline(self, reader):
"Parse a single preamble line."
PreambleParser.preamble.append(reader.currentline())
reader.nextline()
class LstParser(object):
"Parse global and local lstparams."
globalparams = dict()
def parselstset(self, reader):
"Parse a declaration of lstparams in lstset."
paramtext = self.extractlstset(reader)
if not '{' in paramtext:
Trace.error('Missing opening bracket in lstset: ' + paramtext)
return
lefttext = paramtext.split('{')[1]
croppedtext = lefttext[:-1]
LstParser.globalparams = self.parselstparams(croppedtext)
def extractlstset(self, reader):
"Extract the global lstset parameters."
paramtext = ''
while not reader.finished():
paramtext += reader.currentline()
reader.nextline()
if paramtext.endswith('}'):
return paramtext
Trace.error('Could not find end of \\lstset settings; aborting')
def parsecontainer(self, container):
"Parse some lstparams from elyxer.a container."
container.lstparams = LstParser.globalparams.copy()
paramlist = container.getparameterlist('lstparams')
container.lstparams.update(self.parselstparams(paramlist))
def parselstparams(self, paramlist):
"Process a number of lstparams from elyxer.a list."
paramdict = dict()
for param in paramlist:
if not '=' in param:
if len(param.strip()) > 0:
Trace.error('Invalid listing parameter ' + param)
else:
key, value = param.split('=', 1)
paramdict[key] = value
return paramdict
class MacroDefinition(CommandBit):
"A function that defines a new command (a macro)."
macros = dict()
def parsebit(self, pos):
"Parse the function that defines the macro."
self.output = EmptyOutput()
self.parameternumber = 0
self.defaults = []
self.factory.defining = True
self.parseparameters(pos)
self.factory.defining = False
Trace.debug('New command ' + self.newcommand + ' (' + \
unicode(self.parameternumber) + ' parameters)')
self.macros[self.newcommand] = self
def parseparameters(self, pos):
"Parse all optional parameters (number of parameters, default values)"
"and the mandatory definition."
self.newcommand = self.parsenewcommand(pos)
# parse number of parameters
literal = self.parsesquareliteral(pos)
if literal:
self.parameternumber = int(literal)
# parse all default values
bracket = self.parsesquare(pos)
while bracket:
self.defaults.append(bracket)
bracket = self.parsesquare(pos)
# parse mandatory definition
self.definition = self.parseparameter(pos)
def parsenewcommand(self, pos):
"Parse the name of the new command."
self.factory.clearskipped(pos)
if self.factory.detecttype(Bracket, pos):
return self.parseliteral(pos)
if self.factory.detecttype(FormulaCommand, pos):
return self.factory.create(FormulaCommand).extractcommand(pos)
Trace.error('Unknown formula bit in defining function at ' + pos.identifier())
return 'unknown'
def instantiate(self):
"Return an instance of the macro."
return self.definition.clone()
class MacroParameter(FormulaBit):
"A parameter from elyxer.a macro."
def detect(self, pos):
"Find a macro parameter: #n."
return pos.checkfor('#')
def parsebit(self, pos):
"Parse the parameter: #n."
if not pos.checkskip('#'):
Trace.error('Missing parameter start #.')
return
self.number = int(pos.skipcurrent())
self.original = '#' + unicode(self.number)
self.contents = [TaggedBit().constant('#' + unicode(self.number), 'span class="unknown"')]
class MacroFunction(CommandBit):
"A function that was defined using a macro."
commandmap = MacroDefinition.macros
def parsebit(self, pos):
"Parse a number of input parameters."
self.output = FilteredOutput()
self.values = []
macro = self.translated
self.parseparameters(pos, macro)
self.completemacro(macro)
def parseparameters(self, pos, macro):
"Parse as many parameters as are needed."
self.parseoptional(pos, list(macro.defaults))
self.parsemandatory(pos, macro.parameternumber - len(macro.defaults))
if len(self.values) < macro.parameternumber:
Trace.error('Missing parameters in macro ' + unicode(self))
def parseoptional(self, pos, defaults):
"Parse optional parameters."
optional = []
while self.factory.detecttype(SquareBracket, pos):
optional.append(self.parsesquare(pos))
if len(optional) > len(defaults):
break
for value in optional:
default = defaults.pop()
if len(value.contents) > 0:
self.values.append(value)
else:
self.values.append(default)
self.values += defaults
def parsemandatory(self, pos, number):
"Parse a number of mandatory parameters."
for index in range(number):
parameter = self.parsemacroparameter(pos, number - index)
if not parameter:
return
self.values.append(parameter)
def parsemacroparameter(self, pos, remaining):
"Parse a macro parameter. Could be a bracket or a single letter."
"If there are just two values remaining and there is a running number,"
"parse as two separater numbers."
self.factory.clearskipped(pos)
if pos.finished():
return None
if self.factory.detecttype(FormulaNumber, pos):
return self.parsenumbers(pos, remaining)
return self.parseparameter(pos)
def parsenumbers(self, pos, remaining):
"Parse the remaining parameters as a running number."
"For example, 12 would be {1}{2}."
number = self.factory.parsetype(FormulaNumber, pos)
if not len(number.original) == remaining:
return number
for digit in number.original:
value = self.factory.create(FormulaNumber)
value.add(FormulaConstant(digit))
value.type = number
self.values.append(value)
return None
def completemacro(self, macro):
"Complete the macro with the parameters read."
self.contents = [macro.instantiate()]
replaced = [False] * len(self.values)
for parameter in self.searchall(MacroParameter):
index = parameter.number - 1
if index >= len(self.values):
Trace.error('Macro parameter index out of bounds: ' + unicode(index))
return
replaced[index] = True
parameter.contents = [self.values[index].clone()]
for index in range(len(self.values)):
if not replaced[index]:
self.addfilter(index, self.values[index])
def addfilter(self, index, value):
"Add a filter for the given parameter number and parameter value."
original = '#' + unicode(index + 1)
value = ''.join(self.values[0].gethtml())
self.output.addfilter(original, value)
class FormulaMacro(Formula):
"A math macro defined in an inset."
def __init__(self):
self.parser = MacroParser()
self.output = EmptyOutput()
def __unicode__(self):
"Return a printable representation."
return 'Math macro'
FormulaFactory.types += [ MacroParameter ]
FormulaCommand.types += [
MacroFunction,
]
def math2html(formula):
"Convert some TeX math to HTML."
factory = FormulaFactory()
whole = factory.parseformula(formula)
FormulaProcessor().process(whole)
whole.process()
return ''.join(whole.gethtml())
def main():
"Main function, called if invoked from elyxer.the command line"
args = sys.argv
Options().parseoptions(args)
if len(args) != 1:
Trace.error('Usage: math2html.py escaped_string')
exit()
result = math2html(args[0])
Trace.message(result)
if __name__ == '__main__':
main()
|
team-ferret/pip-in-toto
|
refs/heads/master
|
pip/_vendor/html5lib/_trie/__init__.py
|
456
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
# pylint:disable=wrong-import-position
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
# pylint:enable=wrong-import-position
|
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/contrib/gis/db/backends/spatialite/introspection.py
|
401
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
adviti/melange
|
refs/heads/master
|
thirdparty/google_appengine/google/appengine/_internal/django/template/loaders/cached.py
|
23
|
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
from google.appengine._internal.django.core.exceptions import ImproperlyConfigured
from google.appengine._internal.django.template import TemplateDoesNotExist
from google.appengine._internal.django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
from google.appengine._internal.django.utils.hashcompat import sha_constructor
from google.appengine._internal.django.utils.importlib import import_module
class Loader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
for loader in self._loaders:
self._cached_loaders.append(find_template_loader(loader))
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = template_name
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([template_name, sha_constructor('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
|
basicthinker/THNVM
|
refs/heads/master
|
tests/configs/realview-simple-atomic.py
|
61
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
root = LinuxArmFSSystemUniprocessor(mem_mode='atomic',
mem_class=SimpleMemory,
cpu_class=AtomicSimpleCPU).create_root()
|
40223214/-2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_weakrefset.py
|
766
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
mmauroy/SickRage
|
refs/heads/master
|
lib/rtorrent/lib/xmlrpc/http.py
|
180
|
# Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import xmlrpclib
HTTPServerProxy = xmlrpclib.ServerProxy
|
sergiorb/askkit
|
refs/heads/master
|
allauth/socialaccount/providers/persona/tests.py
|
61
|
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from allauth.utils import get_user_model
SOCIALACCOUNT_PROVIDERS = {'persona':
{'AUDIENCE': 'https://www.example.com:433'}}
class PersonaTests(TestCase):
@override_settings(SOCIALACCOUNT_PROVIDERS=SOCIALACCOUNT_PROVIDERS)
def test_login(self):
with patch('allauth.socialaccount.providers.persona.views'
'.requests') as requests_mock:
requests_mock.post.return_value.json.return_value = {
'status': 'okay',
'email': 'persona@mail.com'
}
resp = self.client.post(reverse('persona_login'),
dict(assertion='dummy'))
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
get_user_model().objects.get(email='persona@mail.com')
|
deepesch/scikit-learn
|
refs/heads/master
|
examples/manifold/plot_compare_methods.py
|
259
|
"""
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
reingart/web2conf
|
refs/heads/master
|
models/app_settings.py
|
4
|
# -*- coding: utf-8 -*-
# set user selected language (default spanish)
if request.vars.lang: session.lang=request.vars.lang
T.force(session.lang or "es")
# Return service unavailable
# for maintenance
SUSPEND_SERVICE = False
# True for accepting user activity votes
ALLOW_VOTE = False
# call for proposals
CFP = True
######################################
### PARAMETERS
######################################
import datetime
try:
from gluon.contrib.gql import *
except ImportError:
is_gae=False
else:
is_gae=True
VERSION=0.5
# Set available languages:
T.current_languages=['es','es-ar','es-es']
# If Developer Test, turn off email verificaiton and recaptcha checks,
# db-pooling, use GCO sandbox, etc.:
# DEV_TEST=True # settings suitable for development work
DEV_TEST=True # Deployed settings
if DEV_TEST:
DBURI='sqlite://development.db'
DBPOOLS=0
# to test translations; example: http://..../function?force_language=es
if request.vars.force_language: session.language=request.vars.force_language
if session.language: T.force(session.language)
else:
# DBURI set in app_setting_private.py (unversioned file)
DBURI=None
DBPOOLS=0
TWITTER_HASH = "pyconar"
response.title=T('web2conf')
response.subtitle=''
response.footer=T("""Conference description<b>dates</b> city (organized by <a href="#">users group</a>). <br/>
More info: <a href="#">blog</a> Contact: <a href="#">mail address</a>""")
response.keywords='python, free software'
response.description=T('Powered by web2py')
# Enable or disable dynamic menu
NAVBAR = False
# GOOGLEMAP_KEY set in app_settings_private.py - here just to ensure definition
GOOGLEMAP_KEY=''
# The following GOOGLE items set in app_settings_private.py - here to ensure defaults:
GOOGLE_MERCHANT_ID=''
GOOGLE_MERCHANT_KEY=''
GOOGLE_SANDBOX=DEV_TEST
# Event link in social networks
LINKEDIN_EVENT = ""
FACEBOOK_EVENT = ""
FOOD_PREFERENCES=('normal','vegetarian','vegan','kosher','halal')
FOOD_PREFERENCES_LABELS=(T('normal'),T('vegetarian'),T('vegan'),T('kosher'),T('halal'))
T_SHIRT_SIZES=('', 'S','M','L','XL','XXL','XXXL',)
T_SHIRT_SIZES_LABELS=(T('no, thanks'), T("small"),T("medium"),T("large"),T("xlarge"),T("xxlarge"), T("xxxlarge"),)
# TODAY_DATE is here so that comparizons w/ cutoff dates
# will work properly anywhere in web2conf
# NOTE: we add 6 hours since our server is EST, and this will cover Hawaii
# will want to have these times be session time local in next rev.
TODAY_DATE=datetime.datetime.today()
PROPOSALS_DEADLINE_DATE=datetime.datetime(2013,10,19,23,59,59)
REVIEW_DEADLINE_DATE=datetime.datetime(2013,7,29,23,59,59)
EARLYBIRD_DATE=datetime.datetime(2013,10,12,23,59,0)
PRECONF_DATE=datetime.datetime(2013,11,2,23,59,0)
FACUTOFF_DATE=datetime.datetime(2013,9,30,23,59,0)
REGCLOSE_DATE=datetime.datetime(2013,11,18,23,59,59)
CONFERENCE_DATE=datetime.datetime(2013,10,24,8,00,00)
SIMPLIFIED_REGISTRATION=False # don't ask password on registration
### fix this ...
ATTENDEE_TYPES=(
('gratis',T('Gratuito, $0')),
)
#
ATTENDEE_TYPE_COST=dict(
professional=dict(general=250, preconf=195, earlybird=175, speaker=125),
enthusiast=dict(general=150, preconf=130, earlybird=115, speaker=85),
novice=dict(general=85, preconf=75, earlybird=65, speaker=75),
gratis=dict(general=0, preconf=0, earlybird=0, speaker=0),
)
ATTENDEE_TYPE_COST[None]=dict(general=0, preconf=0, earlybird=0, speaker=0)
ATTENDEE_TYPE_TEXT=dict(
professional="t-shirt, catering, closing party, pro listing (micro-sponsor: logo in badge and web site), and other extra goodies",
enthusiast="t-shirt, catering and other extra goodies",
novice="t-shirt",
gratis="badge, certificate, program guide, community magazine and special benefits (subject to availability)",
)
TUTORIALS_LIST=(
)
TUTORIALS=dict(TUTORIALS_LIST) ### do not remove
TUTORIALS_CAPS={
}
COST_FIRST_TUTORIAL=120.0
COST_SECOND_TUTORIAL=80.0
if CFP:
ACTIVITY_TYPES = ('talk', 'extreme talk')
else:
# default activities
ACTIVITY_TYPES= ('keynote', 'panel', 'plenary',
'talk', 'extreme talk', 'poster',
'tutorial', 'workshop', 'project',
'stand', 'summit', 'open space',
'social', 'break', 'lightning talk',
'sprint', 'paper',
'special')
ACTIVITY_CATEGORIES=sorted(('py3k','gui','web','cli','herramientas',
'lenguaje','fomento','core','educación',
'ciencia','académico','comunidad','moviles',
'caso de estudio','redes','juegos','seguridad',
'testing'))
# override other activities
ACTIVITY_COMMON = ["plenary", "lightning talk", "conference break", "break", "social"]
ACTIVITY_VOTEABLE = ['keynote', 'talk', 'extreme talk', 'tutorial', 'workshop']
ACTIVITY_REVIEWABLE = ACTIVITY_VOTEABLE + ['poster']
ACTIVITY_LEVELS=("Beginner","Intermediate","Advanced")
ACTIVITY_TRACKS=("General", "Science", "Student Works", "Extreme")
ACTIVITY_DURATION={'talk': 40, 'extreme talk': 30, 'tutorial': 120, 'workshop': 0, 'poster': 0, 'project': 0, 'panel': 45, 'plenary': 60, 'keynote': 60}
# TODO: create a room table (id, name, venue)!
ACTIVITY_ROOMS={1: "Auditorium", 2: "Room A", 3: "Room B", 4: "Room C", 7: "Meeting Room", 0: "-"}
ACTIVITY_ROOMS_ADDRESS={1: "", 2: "", 3: "", 4: "", 0: "-"}
# Estimate room sizes (actual size*attendance factor: 0.30 (talks), *1 for workshops, 0.60 for sprints (shared))
ACTIVITY_ROOMS_EST_SIZES={1: 40, 2: 40, 3: 40, 4: 40, 5: 38, 6: 60, 7: 8, 8: 8, 9: 8, 10: 8, 11: 40, 0: "-"}
ACTIVITY_VENUE=SPAN(A("Main Venue \"Downtown\"", _href=URL(c="venue")))
ACTIVITY_SHOW_DESCRIPTION = False # hide desc to public
ACTIVITY_BACKUP_TO = "pyconar2013@gmail.com"
PROPOSALS_DEADLINE_DATE_PER_ACTIVITY_TYPE={
'talk': datetime.datetime(2013,6,30,23,59,59),
'extreme talk': datetime.datetime(2013,6,30,23,59,59),
'tutorial': datetime.datetime(2013,6,30,23,59,59),
'keynote': datetime.datetime(2013,9,12,0,0,0),
'plenary': datetime.datetime(2013,9,12,0,0,0),
'poster': datetime.datetime(2013,10,19,23,59,59),
'paper': datetime.datetime(2013,9,12,0,0,0),
'project': datetime.datetime(2013,10,12,0,0,0),
'stand': datetime.datetime(2013,10,12,0,0,0),
'sprint': datetime.datetime(2013,10,12,0,0,0),
}
ON_PROPOSE_EMAIL = "edvm@fedoraproject.org" #email address list, separated by ";"
PROPOSE_NOTIFY_TEXT = str(T("""Your activity proposal %(activity)s has been recorded.
You can access the current activity information at %(link)s
Thank you"""))
PROPOSE_NOTIFY_SUBJECT = str(T("New activity proposal %(activity)s"))
COMMENT_NOTIFY_TEXT = str(T("""Your activity %(activity)s received a comment by %(user)s:
%(comment)s
"""))
COMMENT_NOTIFY_SUBJECT = str(T("The activity %(activity)s received a comment"))
REVIEW_NOTIFY_TEXT = str(T("A review of your activity %(activity)s has been created or updated by %(user)s."))
REVIEW_NOTIFY_SUBJECT = str(T("Activity %(activity)s review"))
CONFIRM_NOTIFY_TEXT = str(T("""Your activity %(activity)s has been confirmed.
You can access the current activity information at %(link)s"""))
CONFIRM_NOTIFY_SUBJECT = str(T("The activity %(activity)s was confirmed"))
SPONSOR_LEVELS=("Organizer", "Gold", "Silver", "Bronx", "Specials Thanks", "Media", "Adherent")
# verify by email, unless running a developer test:
EMAIL_VERIFICATION= not DEV_TEST
EMAIL_SERVER='localhost:25' #or Configure!
EMAIL_AUTH=None # or 'username:password'
EMAIL_SENDER='pyconar2013@gmail.com'
# on production, mail should be sent by a cron job or similar
# (really, avoid timeout issues and problems like google spam filtering)
MAIL_QUEUE = not DEV_TEST
# for FA applications / communication
FA_EMAIL_UPDATES=True
FA_EMAIL_TO=EMAIL_SENDER
# for testing:
# disable recaptcha by setting DEV_TEST at the top of this file:
DO_RECAPTCHA= not DEV_TEST
# RECAPTCHA public and private keys are set in app_settings_private.py
# - here to ensure defaults:
RECAPTCHA_PUBLIC_KEY=''
RECAPTCHA_PRIVATE_KEY=''
# enable to use social networks single-sign-on
JANRAIN = False
# modules
ENABLE_TALKS=True
ENABLE_EXPENSES = False
ENABLE_FINANCIAL_AID = True
ENABLE_PAYMENTS = True
ENABLE_BADGE = True
if DEV_TEST: # for local development
HOST='localhost:8000'
HOST_NEXT='localhost:8000'
else:
HOST=''
HOST_NEXT=''
HOTELS=('unknown','Hyatt Regency','Crowne Plaza','other','none')
EMAIL_VERIFY_SUBJECT=str(T("%s Registration Confirmation") % response.title)
EMAIL_VERIFY_BODY=str(T("""
Dear Attendee,\n
To proceed with your registration and verify your email, click on the following link:\n
%s\n--\n%s\n""") % (
"http://%s%s/%%(key)s" % (request.env.http_host, URL(r=request,f='verify')),
response.title))
IMAP_URI = None
PASSWORD_RETRIEVE_SUBJECT=str(T("%s Registration Password") % response.title)
PASSWORD_RETRIEVE_BODY=str(T("Your new password is %(password)s"))
INVOICE_HEADER = "This is a Conference Invoice!!!"
CONFERENCE_URL=None
CONFERENCE_COORDS=-20.2597103,-61.4510078 #-31.2597103,-61.4510078
from misc_utils import COUNTRIES, FLAGS
# caching decorator:
def caching(fn):
"Special cache decorator (do not cache if user is logged in)"
if DEV_TEST or request.vars or request.args or response.flash or session.flash or auth.is_logged_in():
return fn
else:
session.forget() # only if no session.flash (allow to clean it!)
return cache(request.env.path_info,time_expire=60*5,cache_model=cache.ram)(fn)
|
postlund/home-assistant
|
refs/heads/dev
|
tests/components/somfy/__init__.py
|
29
|
"""Tests for the Somfy component."""
|
bobeirasa/virtualenvs
|
refs/heads/master
|
pygeckozabbix/lib/python2.7/site-packages/requests/certs.py
|
961
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
KaranToor/MA450
|
refs/heads/master
|
google-cloud-sdk/lib/surface/app/regions/list.py
|
3
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The `app regions list` command."""
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List the availability of flex and standard environments for each region."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To view regional availability of App Engine runtime environments, run:
$ {command}
""",
}
def Collection(self):
return 'appengine.regions'
def Run(self, args):
api_client = appengine_api_client.GetApiClient()
return sorted(api_client.ListRegions())
|
TieWei/nova
|
refs/heads/enhanced/havana
|
nova/cert/rpcapi.py
|
8
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the cert manager RPC API.
"""
from oslo.config import cfg
from nova import rpcclient
rpcapi_opts = [
cfg.StrOpt('cert_topic',
default='cert',
help='the topic cert nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('cert',
help='Set a version cap for messages sent to cert services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CertAPI(rpcclient.RpcProxy):
'''Client side of the cert rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
... Grizzly supports message version 1.1. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.1.
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '1.0'
VERSION_ALIASES = {
'grizzly': '1.1',
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cert,
CONF.upgrade_levels.cert)
super(CertAPI, self).__init__(
topic=CONF.cert_topic,
default_version=self.BASE_RPC_API_VERSION,
version_cap=version_cap)
self.client = self.get_client()
def revoke_certs_by_user(self, ctxt, user_id):
return self.client.call(ctxt, 'revoke_certs_by_user', user_id=user_id)
def revoke_certs_by_project(self, ctxt, project_id):
return self.client.call(ctxt, 'revoke_certs_by_project',
project_id=project_id)
def revoke_certs_by_user_and_project(self, ctxt, user_id, project_id):
return self.client.call(ctxt, 'revoke_certs_by_user_and_project',
user_id=user_id, project_id=project_id)
def generate_x509_cert(self, ctxt, user_id, project_id):
return self.client.call(ctxt, 'generate_x509_cert',
user_id=user_id,
project_id=project_id)
def fetch_ca(self, ctxt, project_id):
return self.client.call(ctxt, 'fetch_ca', project_id=project_id)
def fetch_crl(self, ctxt, project_id):
return self.client.call(ctxt, 'fetch_crl', project_id=project_id)
def decrypt_text(self, ctxt, project_id, text):
return self.client.call(ctxt, 'decrypt_text',
project_id=project_id,
text=text)
|
mmazanec22/too-windy
|
refs/heads/master
|
env/lib/python3.5/site-packages/setuptools/command/py36compat.py
|
286
|
import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
|
swiftype/swiftype-enterprise-python
|
refs/heads/master
|
elastic_workplace_search/client.py
|
1
|
from .request_session import RequestSession
from .apis.documents import Documents
from .apis.permissions import Permissions
"""API client for Elastic Workplace Search"""
class Client:
ELASTIC_WORKPLACE_SEARCH_BASE_URL = "http://localhost:3002/api/ws/v1"
def __init__(self, authorization_token, base_url=ELASTIC_WORKPLACE_SEARCH_BASE_URL):
self.authorization_token = authorization_token
self.base_url = base_url
self.session = RequestSession(self.authorization_token, self.base_url)
self.documents = Documents(self.session)
self.permissions = Permissions(self.session)
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyPep8NamingInspection/importConstant.py
|
74
|
from x import TEST as <weak_warning descr="Constant variable imported as non constant">test</weak_warning>
|
Petr-Kovalev/nupic-win32
|
refs/heads/master
|
tests/unit/py2/nupic/math/array_algorithms_test.py
|
1
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for array algorithms."""
import unittest2 as unittest
import numpy
from nupic.bindings.math import nearlyZeroRange
class TestArrayAlgos(unittest.TestCase):
def setUp(self):
self.x = numpy.zeros((10))
def testNearlyZeroRange1(self):
self.assertTrue(nearlyZeroRange(self.x))
def testNearlyZeroRange2(self):
self.assertTrue(nearlyZeroRange(self.x, 1e-8))
def testNearlyZeroRange3(self):
self.assertTrue(nearlyZeroRange(self.x, 2))
if __name__ == '__main__':
unittest.main()
|
trenton42/txbalanced
|
refs/heads/master
|
scenarios/reversal_update/executable.py
|
1
|
import balanced
balanced.configure('ak-test-1o9QKwUCrwstHWO5sGxICtIJdQXFTjnrV')
reversal = balanced.Reversal.fetch('/reversals/RV7DQpcc6sowPOMi29WTjlOU')
reversal.description = 'update this description'
reversal.meta = {
'user.refund.count': '3',
'refund.reason': 'user not happy with product',
'user.notes': 'very polite on the phone',
}
reversal.save()
|
arsfeld/conduit
|
refs/heads/master
|
conduit/modules/GoogleModule/gdata/spreadsheet/text_db.py
|
6
|
#!/usr/bin/python
#
# Copyright Google 2007-2008, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import gdata
import gdata.service
import gdata.spreadsheet
import gdata.spreadsheet.service
import gdata.docs
import gdata.docs.service
"""Make the Google Documents API feel more like using a database.
This module contains a client and other classes which make working with the
Google Documents List Data API and the Google Spreadsheets Data API look a
bit more like working with a heirarchical database. Using the DatabaseClient,
you can create or find spreadsheets and use them like a database, with
worksheets representing tables and rows representing records.
Example Usage:
# Create a new database, a new table, and add records.
client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com',
password='12345')
database = client.CreateDatabase('My Text Database')
table = database.CreateTable('addresses', ['name','email',
'phonenumber', 'mailingaddress'])
record = table.AddRecord({'name':'Bob', 'email':'bob@example.com',
'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'})
# Edit a record
record.content['email'] = 'bob2@example.com'
record.Push()
# Delete a table
table.Delete
Warnings:
Care should be exercised when using this module on spreadsheets
which contain formulas. This module treats all rows as containing text and
updating a row will overwrite any formula with the output of the formula.
The intended use case is to allow easy storage of text data in a spreadsheet.
Error: Domain specific extension of Exception.
BadCredentials: Error raised is username or password was incorrect.
CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge
was issued.
DatabaseClient: Communicates with Google Docs APIs servers.
Database: Represents a spreadsheet and interacts with tables.
Table: Represents a worksheet and interacts with records.
RecordResultSet: A list of records in a table.
Record: Represents a row in a worksheet allows manipulation of text data.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
class Error(Exception):
pass
class BadCredentials(Error):
pass
class CaptchaRequired(Error):
pass
class DatabaseClient(object):
"""Allows creation and finding of Google Spreadsheets databases.
The DatabaseClient simplifies the process of creating and finding Google
Spreadsheets and will talk to both the Google Spreadsheets API and the
Google Documents List API.
"""
def __init__(self, username=None, password=None):
"""Constructor for a Database Client.
If the username and password are present, the constructor will contact
the Google servers to authenticate.
Args:
username: str (optional) Example: jo@example.com
password: str (optional)
"""
self.__docs_client = gdata.docs.service.DocsService()
self.__spreadsheets_client = (
gdata.spreadsheet.service.SpreadsheetsService())
self.SetCredentials(username, password)
def SetCredentials(self, username, password):
"""Attempts to log in to Google APIs using the provided credentials.
If the username or password are None, the client will not request auth
tokens.
Args:
username: str (optional) Example: jo@example.com
password: str (optional)
"""
self.__docs_client.email = username
self.__docs_client.password = password
self.__spreadsheets_client.email = username
self.__spreadsheets_client.password = password
if username and password:
try:
self.__docs_client.ProgrammaticLogin()
self.__spreadsheets_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
raise CaptchaRequired('Please visit https://www.google.com/accounts/'
'DisplayUnlockCaptcha to unlock your account.')
except gdata.service.BadAuthentication:
raise BadCredentials('Username or password incorrect.')
def CreateDatabase(self, name):
"""Creates a new Google Spreadsheet with the desired name.
Args:
name: str The title for the spreadsheet.
Returns:
A Database instance representing the new spreadsheet.
"""
# Create a Google Spreadsheet to form the foundation of this database.
# Spreadsheet is created by uploading a file to the Google Documents
# List API.
virtual_csv_file = StringIO.StringIO(',,,')
virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3)
db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name)
return Database(spreadsheet_entry=db_entry, database_client=self)
def GetDatabases(self, spreadsheet_key=None, name=None):
"""Finds spreadsheets which have the unique key or title.
If querying on the spreadsheet_key there will be at most one result, but
searching by name could yield multiple results.
Args:
spreadsheet_key: str The unique key for the spreadsheet, this
usually in the the form 'pk23...We' or 'o23...423.12,,,3'.
name: str The title of the spreadsheets.
Returns:
A list of Database objects representing the desired spreadsheets.
"""
if spreadsheet_key:
db_entry = self.__docs_client.GetDocumentListEntry(
r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key)
return [Database(spreadsheet_entry=db_entry, database_client=self)]
else:
title_query = gdata.docs.service.DocumentQuery()
title_query['title'] = name
db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri())
matching_databases = []
for entry in db_feed.entry:
matching_databases.append(Database(spreadsheet_entry=entry,
database_client=self))
return matching_databases
def _GetDocsClient(self):
return self.__docs_client
def _GetSpreadsheetsClient(self):
return self.__spreadsheets_client
class Database(object):
"""Provides interface to find and create tables.
The database represents a Google Spreadsheet.
"""
def __init__(self, spreadsheet_entry=None, database_client=None):
"""Constructor for a database object.
Args:
spreadsheet_entry: gdata.docs.DocumentListEntry The
Atom entry which represents the Google Spreadsheet. The
spreadsheet's key is extracted from the entry and stored as a
member.
database_client: DatabaseClient A client which can talk to the
Google Spreadsheets servers to perform operations on worksheets
within this spreadsheet.
"""
self.entry = spreadsheet_entry
if self.entry:
id_parts = spreadsheet_entry.id.text.split('/')
self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '')
self.client = database_client
def CreateTable(self, name, fields=None):
"""Add a new worksheet to this spreadsheet and fill in column names.
Args:
name: str The title of the new worksheet.
fields: list of strings The column names which are placed in the
first row of this worksheet. These names are converted into XML
tags by the server. To avoid changes during the translation
process I recommend using all lowercase alphabetic names. For
example ['somelongname', 'theothername']
Returns:
Table representing the newly created worksheet.
"""
worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name,
row_count=1, col_count=len(fields), key=self.spreadsheet_key)
return Table(name=name, worksheet_entry=worksheet,
database_client=self.client,
spreadsheet_key=self.spreadsheet_key, fields=fields)
def GetTables(self, worksheet_id=None, name=None):
"""Searches for a worksheet with the specified ID or name.
The list of results should have one table at most, or no results
if the id or name were not found.
Args:
worksheet_id: str The ID of the worksheet, example: 'od6'
name: str The title of the worksheet.
Returns:
A list of length 0 or 1 containing the desired Table. A list is returned
to make this method feel like GetDatabases and GetRecords.
"""
if worksheet_id:
worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, wksht_id=worksheet_id)
return [Table(name=worksheet_entry.title.text,
worksheet_entry=worksheet_entry, database_client=self.client,
spreadsheet_key=self.spreadsheet_key)]
else:
matching_tables = []
title_query = gdata.spreadsheet.service.DocumentQuery()
title_query.title = name
worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, query=title_query)
for entry in worksheet_feed.entry:
matching_tables.append(Table(name=entry.title.text,
worksheet_entry=entry, database_client=self.client,
spreadsheet_key=self.spreadsheet_key))
return matching_tables
def Delete(self):
"""Deletes the entire database spreadsheet from Google Spreadsheets."""
entry = self.client._GetDocsClient().Get(
r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' +
self.spreadsheet_key)
self.client._GetDocsClient().Delete(entry.GetEditLink().href)
class Table(object):
def __init__(self, name=None, worksheet_entry=None, database_client=None,
spreadsheet_key=None, fields=None):
self.name = name
self.entry = worksheet_entry
id_parts = worksheet_entry.id.text.split('/')
self.worksheet_id = id_parts[-1]
self.spreadsheet_key = spreadsheet_key
self.client = database_client
self.fields = fields or []
if fields:
self.SetFields(fields)
def LookupFields(self):
"""Queries to find the column names in the first row of the worksheet.
Useful when you have retrieved the table from the server and you don't
know the column names.
"""
if self.entry:
first_row_contents = []
query = gdata.spreadsheet.service.CellQuery()
query.max_row = '1'
query.min_row = '1'
feed = self.client._GetSpreadsheetsClient().GetCellsFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=query)
for entry in feed.entry:
first_row_contents.append(entry.content.text)
# Get the next set of cells if needed.
next_link = feed.GetNextLink()
while next_link:
feed = self.client._GetSpreadsheetsClient().Get(next_link.href,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString)
for entry in feed.entry:
first_row_contents.append(entry.content.text)
next_link = feed.GetNextLink()
# Convert the contents of the cells to valid headers.
self.fields = ConvertStringsToColumnHeaders(first_row_contents)
def SetFields(self, fields):
"""Changes the contents of the cells in the first row of this worksheet.
Args:
fields: list of strings The names in the list comprise the
first row of the worksheet. These names are converted into XML
tags by the server. To avoid changes during the translation
process I recommend using all lowercase alphabetic names. For
example ['somelongname', 'theothername']
"""
# TODO: If the table already had fields, we might want to clear out the,
# current column headers.
self.fields = fields
i = 0
for column_name in fields:
i = i + 1
# TODO: speed this up by using a batch request to update cells.
self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name,
self.spreadsheet_key, self.worksheet_id)
def Delete(self):
"""Deletes this worksheet from the spreadsheet."""
worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id)
self.client._GetSpreadsheetsClient().DeleteWorksheet(
worksheet_entry=worksheet)
def AddRecord(self, data):
"""Adds a new row to this worksheet.
Args:
data: dict of strings Mapping of string values to column names.
Returns:
Record which represents this row of the spreadsheet.
"""
new_row = self.client._GetSpreadsheetsClient().InsertRow(data,
self.spreadsheet_key, wksht_id=self.worksheet_id)
return Record(content=data, row_entry=new_row,
spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id,
database_client=self.client)
def GetRecord(self, row_id=None, row_number=None):
"""Gets a single record from the worksheet based on row ID or number.
Args:
row_id: The ID for the individual row.
row_number: str or int The position of the desired row. Numbering
begins at 1, which refers to the second row in the worksheet since
the first row is used for column names.
Returns:
Record for the desired row.
"""
if row_id:
row_entry = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id)
return Record(content=None, row_entry=row_entry,
spreadsheet_key=self.spreadsheet_key,
worksheet_id=self.worksheet_id, database_client=self.client)
else:
row_query = gdata.spreadsheet.service.ListQuery()
row_query.start_index = str(row_number)
row_query.max_results = '1'
row_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
if len(row_feed.entry) >= 1:
return Record(content=None, row_entry=row_feed.entry[0],
spreadsheet_key=self.spreadsheet_key,
worksheet_id=self.worksheet_id, database_client=self.client)
else:
return None
def GetRecords(self, start_row, end_row):
"""Gets all rows between the start and end row numbers inclusive.
Args:
start_row: str or int
end_row: str or int
Returns:
RecordResultSet for the desired rows.
"""
start_row = int(start_row)
end_row = int(end_row)
max_rows = end_row - start_row + 1
row_query = gdata.spreadsheet.service.ListQuery()
row_query.start_index = str(start_row)
row_query.max_results = str(max_rows)
rows_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
return RecordResultSet(rows_feed, self.client, self.spreadsheet_key,
self.worksheet_id)
def FindRecords(self, query_string):
"""Performs a query against the worksheet to find rows which match.
For details on query string syntax see the section on sq under
http://code.google.com/apis/spreadsheets/reference.html#list_Parameters
Args:
query_string: str Examples: 'name == john' to find all rows with john
in the name column, '(cost < 19.50 and name != toy) or cost > 500'
Returns:
RecordResultSet with the first group of matches.
"""
row_query = gdata.spreadsheet.service.ListQuery()
row_query.sq = query_string
matching_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
return RecordResultSet(matching_feed, self.client,
self.spreadsheet_key, self.worksheet_id)
class RecordResultSet(list):
"""A collection of rows which allows fetching of the next set of results.
The server may not send all rows in the requested range because there are
too many. Using this result set you can access the first set of results
as if it is a list, then get the next batch (if there are more results) by
calling GetNext().
"""
def __init__(self, feed, client, spreadsheet_key, worksheet_id):
self.client = client
self.spreadsheet_key = spreadsheet_key
self.worksheet_id = worksheet_id
self.feed = feed
list(self)
for entry in self.feed.entry:
self.append(Record(content=None, row_entry=entry,
spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id,
database_client=client))
def GetNext(self):
"""Fetches the next batch of rows in the result set.
Returns:
A new RecordResultSet.
"""
next_link = self.feed.GetNextLink()
if next_link and next_link.href:
new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href,
converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
return RecordResultSet(new_feed, self.client, self.spreadsheet_key,
self.worksheet_id)
class Record(object):
"""Represents one row in a worksheet and provides a dictionary of values.
Attributes:
custom: dict Represents the contents of the row with cell values mapped
to column headers.
"""
def __init__(self, content=None, row_entry=None, spreadsheet_key=None,
worksheet_id=None, database_client=None):
"""Constructor for a record.
Args:
content: dict of strings Mapping of string values to column names.
row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry
representing this row in the worksheet.
spreadsheet_key: str The ID of the spreadsheet in which this row
belongs.
worksheet_id: str The ID of the worksheet in which this row belongs.
database_client: DatabaseClient The client which can be used to talk
the Google Spreadsheets server to edit this row.
"""
self.entry = row_entry
self.spreadsheet_key = spreadsheet_key
self.worksheet_id = worksheet_id
if row_entry:
self.row_id = row_entry.id.text.split('/')[-1]
else:
self.row_id = None
self.client = database_client
self.content = content or {}
if not content:
self.ExtractContentFromEntry(row_entry)
def ExtractContentFromEntry(self, entry):
"""Populates the content and row_id based on content of the entry.
This method is used in the Record's contructor.
Args:
entry: gdata.spreadsheet.SpreadsheetsList The Atom entry
representing this row in the worksheet.
"""
self.content = {}
if entry:
self.row_id = entry.id.text.split('/')[-1]
for label, custom in entry.custom.iteritems():
self.content[label] = custom.text
def Push(self):
"""Send the content of the record to spreadsheets to edit the row.
All items in the content dictionary will be sent. Items which have been
removed from the content may remain in the row. The content member
of the record will not be modified so additional fields in the row
might be absent from this local copy.
"""
self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content)
def Pull(self):
"""Query Google Spreadsheets to get the latest data from the server.
Fetches the entry for this row and repopulates the content dictionary
with the data found in the row.
"""
if self.row_id:
self.entry = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id)
self.ExtractContentFromEntry(self.entry)
def Delete(self):
self.client._GetSpreadsheetsClient().DeleteRow(self.entry)
def ConvertStringsToColumnHeaders(proposed_headers):
"""Converts a list of strings to column names which spreadsheets accepts.
When setting values in a record, the keys which represent column names must
fit certain rules. They are all lower case, contain no spaces or special
characters. If two columns have the same name after being sanitized, the
columns further to the right have _2, _3 _4, etc. appended to them.
If there are column names which consist of all special characters, or if
the column header is blank, an obfuscated value will be used for a column
name. This method does not handle blank column names or column names with
only special characters.
"""
headers = []
for input_string in proposed_headers:
# TODO: probably a more efficient way to do this. Perhaps regex.
sanitized = input_string.lower().replace('_', '').replace(
':', '').replace(' ', '')
# When the same sanitized header appears multiple times in the first row
# of a spreadsheet, _n is appended to the name to make it unique.
header_count = headers.count(sanitized)
if header_count > 0:
headers.append('%s_%i' % (sanitized, header_count+1))
else:
headers.append(sanitized)
return headers
|
philipn/sycamore
|
refs/heads/master
|
Sycamore/action/captcha.py
|
2
|
# -*- coding: utf-8 -*-
"""
Sycamore - captcha action
This action sends and generates CAPTCHAS to test if someone is human
@copyright: 2006 Philip Neustrom <philipn@gmail.com>
@license: GNU GPL, see COPYING for details.
"""
# Imports
import random
import time
import mimetypes
import cStringIO
import tempfile
import os
from Sycamore import config
from Sycamore import wikidb
from Sycamore.Page import Page
do_email_auth = True
actname = __name__.split('.')[-1]
# evil porn captcha bait!!11 (how long is our captcha image good for?)
CAPTCHA_VALID_TIME = 60*2
def send_captcha(page, wikiname, action, auth_code, type):
captcha = Captcha(page)
generated = captcha.generate(type=type)
if captcha.type == 'png':
audio_url = '%s?action=%s&wikiname=%s&code=%s&audio=1' % (
page.url(), action, wikiname, auth_code)
msg = ('<p>%s</p><p>What numbers are in the above distorted image?: '
'<form method="POST" action="%s">'
'<input type="hidden" name="action" value="%s">'
'<input type="hidden" name="code" value="%s">'
'<input type="hidden" name="wikiname" value="%s">'
'<input type="text" size="13" name="captcha_code"/>'
'<input type="hidden" name="captcha_id" value="%s"/> '
'<input type="submit" name="save" value="Save"/>'
'</form>'
'<p>Can\'t see the image? '
'<a href="%s">Do audio verification.</a></p>' %
(captcha.link(), page.url(), action, auth_code, wikiname,
captcha.id, audio_url))
page.send_page(msg=msg)
elif captcha.type == 'wav':
image_url = '%s?action=%s&wikiname=%s&code=%s' % (
page.url(), action, wikiname, auth_code)
msg = ('<p>%s</p><p>What numbers do you hear in the above sound?: '
'<form method="POST" action="%s">'
'<input type="hidden" name="action" value="%s">'
'<input type="hidden" name="code" value="%s">'
'<input type="hidden" name="wikiname" value="%s">'
'<input type="text" size="13" name="captcha_code"/>'
'<input type="hidden" name="captcha_id" value="%s"/> '
'<input type="submit" name="save" value="Save"/></form>'
'<p>Can\'t hear the audio? '
'<a href="%s">Do image verification.</a></p>' %
(captcha.link(), page.url(), action, auth_code, wikiname,
captcha.id, image_url))
page.send_page(msg=msg)
def generate_audio(word):
"""
Generates the audio for the provided string word.
Uses sox, so you must have sox installed for this to work.
(This distortion method learned from LiveJournal's LJ::Captcha.pm)
@rtype: string
@return: string representing the wav audio file
"""
tmp_dir = tempfile.mkdtemp(prefix="audio_verify")
# use sox to generate the base speech by combining stored number audio
args = []
for number in word:
args.append(os.path.join(config.data_dir, 'audio',
'speech%s.wav' % number))
args = [config.sox_location] + args + [os.path.join(tmp_dir, 'tmp.wav')]
os.spawnl(os.P_WAIT, config.sox_location, *args)
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'speech.wav'))
# do sox distortions
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
'-r', '44100', os.path.join(tmp_dir, 'speech.wav'),
os.path.join(tmp_dir, 'tmp.wav'),
'reverb', '0.5', '210', '100', '60',
'echo', '1', '0.7', '100', '0.03', '400', '0.11')
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'speech.wav'))
vibro_amount = str(random.randint(3,9))
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
'-r', '44100', os.path.join(tmp_dir, 'speech.wav'),
os.path.join(tmp_dir, 'noise.wav'),
'synth', 'brownnoise', '0',
'vibro', vibro_amount, '0.8',
'vol', '0.1')
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
'-r', '44100', os.path.join(tmp_dir, 'noise.wav'),
os.path.join(tmp_dir, 'tmp.wav'),
'fade', '0.5')
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'noise.wav'))
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
'-r', '44100', os.path.join(tmp_dir, 'noise.wav'),
os.path.join(tmp_dir, 'tmp.wav'),
'reverse')
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'noise.wav'))
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
'-r', '44100', os.path.join(tmp_dir, 'speech.wav'),
os.path.join(tmp_dir, 'tmp.wav'),
'synth', 'brownnoise', '0',
'fade', '0.5')
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'noise.wav'))
os.spawnl(os.P_WAIT, config.sox_location + 'mix',
config.sox_location + 'mix',
'-v', '4', os.path.join(tmp_dir, 'speech.wav'),
os.path.join(tmp_dir, 'noise.wav'),
'-r', '30000', os.path.join(tmp_dir, 'tmp.wav'))
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'speech.wav'))
os.remove(os.path.join(tmp_dir, 'noise.wav'))
vibro_amount = str(random.randint(3,8))
vibro_intensity = str(random.uniform(0.5, 0.6))
os.spawnl(os.P_WAIT, config.sox_location, config.sox_location,
os.path.join(tmp_dir, 'speech.wav'),
os.path.join(tmp_dir, 'tmp.wav'),
'vibro', vibro_amount, vibro_intensity)
os.rename(os.path.join(tmp_dir, 'tmp.wav'),
os.path.join(tmp_dir, 'speech.wav'))
# read in the generated .wav and return it
f = open(os.path.join(tmp_dir, 'speech.wav'), 'rb')
audio = ''.join(f.readlines())
f.close()
os.remove(os.path.join(tmp_dir, 'speech.wav'))
os.rmdir(tmp_dir)
return audio
class Captcha(object):
def __init__(self, page, id=None):
self.page = page
self.request = page.request
self.id = id
self.human_readable_secret = None
self.type = None
def set_id(self, id):
self.id = id
def _generate_human_readable(self, random_numbers, type='png'):
"""
Returns a human readable blob of binary-ness to be eventually
seen by a person, we hope.
"""
from Sycamore.support import Captcha
from Sycamore.support.Captcha.Visual.Tests import BWGimpy
if type == 'png':
save_imagefile = cStringIO.StringIO()
g = BWGimpy(word=random_numbers)
i = g.render(size=(200,80))
i.save(save_imagefile, type, quality=90)
image_value = save_imagefile.getvalue()
image_value = wikidb.dbapi.Binary(image_value)
return image_value
elif type == 'wav':
audio_value = generate_audio(word=random_numbers)
audio_value = wikidb.dbapi.Binary(audio_value)
return audio_value
def check(self, given_secret):
"""
Does given_secret match the secret in the db associated with
this captcha?
Cleans up the captcha from the database afterward, so the
captcha cannot be used again.
"""
self.request.cursor.execute(
"""SELECT secret from captchas
where id=%(id)s and written_time > %(timevalid)s""",
{'id':self.id, 'timevalid':(time.time() - CAPTCHA_VALID_TIME)})
result = self.request.cursor.fetchone()
if result:
secret = result[0]
if (given_secret == secret):
self._clean_up()
return True
return False
def _clean_up(self):
"""
Removes captcha from database.
"""
d = {'id':self.id, 'timevalid':(time.time() - CAPTCHA_VALID_TIME)}
if self.id:
self.request.cursor.execute(
"DELETE from captchas where id=%(id)s", d, isWrite=True)
# decent place to clear out old expired captchas
self.request.cursor.execute(
"DELETE from captchas where written_time <= %(timevalid)s",
d, isWrite=True)
def generate(self, type='png'):
"""
Generates the captcha/secret and stores it into the database.
"""
self.type = type
random_numbers = []
for i in xrange(0, 5):
random_numbers.append(str(random.randint(0,9)))
random_numbers = ''.join(random_numbers)
self.human_readable_secret = self._generate_human_readable(
random_numbers, type=type)
# fairly unique looking id w/length limited due
# to machine differences w/time
id = '%s%s.%s' % (str(time.time()), str(random.random())[0:26], type)
d = {'id': id, 'written_time': time.time(), 'secret': random_numbers,
'human_readable_secret': self.human_readable_secret}
self.request.cursor.execute(
"""INSERT into captchas
(id, secret, human_readable_secret, written_time)
values
(%(id)s, %(secret)s, %(human_readable_secret)s,
%(written_time)s)""", d, isWrite=True)
self.id = id
def url(self):
"""
Sends the full URL to the captcha.
"""
return self.page.url(querystr='action=%s&id=%s' % (actname, self.id))
def link(self):
"""
Embeds the captcha.
"""
url = self.url()
if self.type == 'png':
return '<img src="%s"/>' % url
elif self.type == 'wav':
return '<a href="%s">Play the sound</a>' % url
def send_human_readable(id, request):
mimetype = None
request.cursor.execute(
"""SELECT human_readable_secret from captchas
where id=%(id)s and written_time > %(timevalid)s""",
{'id':id, 'timevalid': (time.time() - CAPTCHA_VALID_TIME)})
result = request.cursor.fetchone()
if not result:
request.http_headers()
request.write("No captcha for this id? "
"The captcha could have expired, so you may want to "
"try again.")
return
human_readable = wikidb.binaryToString(result[0])
mimetype = mimetypes.guess_type(id)[0]
if not mimetype:
mimetype = "application/octet-stream"
request.do_gzip = False
request.http_headers([("Content-Type", mimetype),
("Content-Length", len(human_readable))])
#output file
request.write(human_readable, raw=True)
def execute(pagename, request):
page = Page(pagename, request)
msg = None
form = request.form
if form.has_key('id'):
id = request.form['id'][0]
return send_human_readable(id, request)
return page.send_page(msg=msg)
|
mattvonrocketstein/smash
|
refs/heads/master
|
smashlib/ipy3x/utils/daemonize.py
|
1
|
"""daemonize function from twisted.scripts._twistd_unix."""
#-----------------------------------------------------------------------------
# Copyright (c) Twisted Matrix Laboratories.
# See Twisted's LICENSE for details.
# http://twistedmatrix.com/
#-----------------------------------------------------------------------------
import os
import errno
def daemonize():
# See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
null = os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError as e:
if e.errno != errno.EBADF:
raise
os.close(null)
|
snowballdaemons/MMXIV
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
frankyrumple/smc
|
refs/heads/master
|
modules/ecdsa/numbertheory.py
|
29
|
#! /usr/bin/env python
#
# Provide some simple capabilities from number theory.
#
# Version of 2008.11.14.
#
# Written in 2005 and 2006 by Peter Pearson and placed in the public domain.
# Revision history:
# 2008.11.14: Use pow( base, exponent, modulus ) for modular_exp.
# Make gcd and lcm accept arbitrarly many arguments.
from __future__ import division
from .six import print_, integer_types
from .six.moves import reduce
import math
class Error( Exception ):
"""Base class for exceptions in this module."""
pass
class SquareRootError( Error ):
pass
class NegativeExponentError( Error ):
pass
def modular_exp( base, exponent, modulus ):
"Raise base to exponent, reducing by modulus"
if exponent < 0:
raise NegativeExponentError( "Negative exponents (%d) not allowed" \
% exponent )
return pow( base, exponent, modulus )
# result = 1L
# x = exponent
# b = base + 0L
# while x > 0:
# if x % 2 > 0: result = (result * b) % modulus
# x = x // 2
# b = ( b * b ) % modulus
# return result
def polynomial_reduce_mod( poly, polymod, p ):
"""Reduce poly by polymod, integer arithmetic modulo p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This module has been tested only by extensive use
# in calculating modular square roots.
# Just to make this easy, require a monic polynomial:
assert polymod[-1] == 1
assert len( polymod ) > 1
while len( poly ) >= len( polymod ):
if poly[-1] != 0:
for i in range( 2, len( polymod ) + 1 ):
poly[-i] = ( poly[-i] - poly[-1] * polymod[-i] ) % p
poly = poly[0:-1]
return poly
def polynomial_multiply_mod( m1, m2, polymod, p ):
"""Polynomial multiplication modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This is just a seat-of-the-pants implementation.
# This module has been tested only by extensive use
# in calculating modular square roots.
# Initialize the product to zero:
prod = ( len( m1 ) + len( m2 ) - 1 ) * [0]
# Add together all the cross-terms:
for i in range( len( m1 ) ):
for j in range( len( m2 ) ):
prod[i+j] = ( prod[i+j] + m1[i] * m2[j] ) % p
return polynomial_reduce_mod( prod, polymod, p )
def polynomial_exp_mod( base, exponent, polymod, p ):
"""Polynomial exponentiation modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# Based on the Handbook of Applied Cryptography, algorithm 2.227.
# This module has been tested only by extensive use
# in calculating modular square roots.
assert exponent < p
if exponent == 0: return [ 1 ]
G = base
k = exponent
if k%2 == 1: s = G
else: s = [ 1 ]
while k > 1:
k = k // 2
G = polynomial_multiply_mod( G, G, polymod, p )
if k%2 == 1: s = polynomial_multiply_mod( G, s, polymod, p )
return s
def jacobi( a, n ):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 )
def square_root_mod_prime( a, p ):
"""Modular square root of a, mod p, p prime."""
# Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39.
# This module has been tested for all values in [0,p-1] for
# every prime p from 3 to 1229.
assert 0 <= a < p
assert 1 < p
if a == 0: return 0
if p == 2: return a
jac = jacobi( a, p )
if jac == -1: raise SquareRootError( "%d has no square root modulo %d" \
% ( a, p ) )
if p % 4 == 3: return modular_exp( a, (p+1)//4, p )
if p % 8 == 5:
d = modular_exp( a, (p-1)//4, p )
if d == 1: return modular_exp( a, (p+3)//8, p )
if d == p-1: return ( 2 * a * modular_exp( 4*a, (p-5)//8, p ) ) % p
raise RuntimeError("Shouldn't get here.")
for b in range( 2, p ):
if jacobi( b*b-4*a, p ) == -1:
f = ( a, -b, 1 )
ff = polynomial_exp_mod( ( 0, 1 ), (p+1)//2, f, p )
assert ff[1] == 0
return ff[0]
raise RuntimeError("No b found.")
def inverse_mod( a, m ):
"""Inverse of a mod m."""
if a < 0 or m <= a: a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod( d, c ) + ( c, )
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0: return ud
else: return ud + m
def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def gcd( *a ):
"""Greatest common divisor.
Usage: gcd( [ 2, 4, 6 ] )
or: gcd( 2, 4, 6 )
"""
if len( a ) > 1: return reduce( gcd2, a )
if hasattr( a[0], "__iter__" ): return reduce( gcd2, a[0] )
return a[0]
def lcm2(a,b):
"""Least common multiple of two integers."""
return (a*b)//gcd(a,b)
def lcm( *a ):
"""Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
"""
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0]
def factorization( n ):
"""Decompose n into a list of (prime,exponent) pairs."""
assert isinstance( n, integer_types )
if n < 2: return []
result = []
d = 2
# Test the small primes:
for d in smallprimes:
if d > n: break
q, r = divmod( n, d )
if r == 0:
count = 1
while d <= n:
n = q
q, r = divmod( n, d )
if r != 0: break
count = count + 1
result.append( ( d, count ) )
# If n is still greater than the last of our small primes,
# it may require further work:
if n > smallprimes[-1]:
if is_prime( n ): # If what's left is prime, it's easy:
result.append( ( n, 1 ) )
else: # Ugh. Search stupidly for a divisor:
d = smallprimes[-1]
while 1:
d = d + 2 # Try the next divisor.
q, r = divmod( n, d )
if q < d: break # n < d*d means we're done, n = 1 or prime.
if r == 0: # d divides n. How many times?
count = 1
n = q
while d <= n: # As long as d might still divide n,
q, r = divmod( n, d ) # see if it does.
if r != 0: break
n = q # It does. Reduce n, increase count.
count = count + 1
result.append( ( d, count ) )
if n > 1: result.append( ( n, 1 ) )
return result
def phi( n ):
"""Return the Euler totient function of n."""
assert isinstance( n, integer_types )
if n < 3: return 1
result = 1
ff = factorization( n )
for f in ff:
e = f[1]
if e > 1:
result = result * f[0] ** (e-1) * ( f[0] - 1 )
else:
result = result * ( f[0] - 1 )
return result
def carmichael( n ):
"""Return Carmichael function of n.
Carmichael(n) is the smallest integer x such that
m**x = 1 mod n for all m relatively prime to n.
"""
return carmichael_of_factorized( factorization( n ) )
def carmichael_of_factorized( f_list ):
"""Return the Carmichael function of a number that is
represented as a list of (prime,exponent) pairs.
"""
if len( f_list ) < 1: return 1
result = carmichael_of_ppower( f_list[0] )
for i in range( 1, len( f_list ) ):
result = lcm( result, carmichael_of_ppower( f_list[i] ) )
return result
def carmichael_of_ppower( pp ):
"""Carmichael function of the given power of the given prime.
"""
p, a = pp
if p == 2 and a > 2: return 2**(a-2)
else: return (p-1) * p**(a-1)
def order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m.
"""
# Warning: this implementation is not very clever, and will
# take a long time if m is very large.
if m <= 1: return 0
assert gcd( x, m ) == 1
z = x
result = 1
while z != 1:
z = ( z * x ) % m
result = result + 1
return result
def largest_factor_relatively_prime( a, b ):
"""Return the largest factor of a relatively prime to b.
"""
while 1:
d = gcd( a, b )
if d <= 1: break
b = d
while 1:
q, r = divmod( a, d )
if r > 0:
break
a = q
return a
def kinda_order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m',
where m' is the largest factor of m relatively prime to x.
"""
return order_mod( x, largest_factor_relatively_prime( m, x ) )
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True
def next_prime( starting_value ):
"Return the smallest prime larger than the starting value."
if starting_value < 2: return 2
result = ( starting_value + 1 ) | 1
while not is_prime( result ): result = result + 2
return result
smallprimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229]
miller_rabin_test_count = 0
def __main__():
# Making sure locally defined exceptions work:
# p = modular_exp( 2, -2, 3 )
# p = square_root_mod_prime( 2, 3 )
print_("Testing gcd...")
assert gcd( 3*5*7, 3*5*11, 3*5*13 ) == 3*5
assert gcd( [ 3*5*7, 3*5*11, 3*5*13 ] ) == 3*5
assert gcd( 3 ) == 3
print_("Testing lcm...")
assert lcm( 3, 5*3, 7*3 ) == 3*5*7
assert lcm( [ 3, 5*3, 7*3 ] ) == 3*5*7
assert lcm( 3 ) == 3
print_("Testing next_prime...")
bigprimes = ( 999671,
999683,
999721,
999727,
999749,
999763,
999769,
999773,
999809,
999853,
999863,
999883,
999907,
999917,
999931,
999953,
999959,
999961,
999979,
999983 )
for i in range( len( bigprimes ) - 1 ):
assert next_prime( bigprimes[i] ) == bigprimes[ i+1 ]
error_tally = 0
# Test the square_root_mod_prime function:
for p in smallprimes:
print_("Testing square_root_mod_prime for modulus p = %d." % p)
squares = []
for root in range( 0, 1+p//2 ):
sq = ( root * root ) % p
squares.append( sq )
calculated = square_root_mod_prime( sq, p )
if ( calculated * calculated ) % p != sq:
error_tally = error_tally + 1
print_("Failed to find %d as sqrt( %d ) mod %d. Said %d." % \
( root, sq, p, calculated ))
for nonsquare in range( 0, p ):
if nonsquare not in squares:
try:
calculated = square_root_mod_prime( nonsquare, p )
except SquareRootError:
pass
else:
error_tally = error_tally + 1
print_("Failed to report no root for sqrt( %d ) mod %d." % \
( nonsquare, p ))
# Test the jacobi function:
for m in range( 3, 400, 2 ):
print_("Testing jacobi for modulus m = %d." % m)
if is_prime( m ):
squares = []
for root in range( 1, m ):
if jacobi( root * root, m ) != 1:
error_tally = error_tally + 1
print_("jacobi( %d * %d, %d ) != 1" % ( root, root, m ))
squares.append( root * root % m )
for i in range( 1, m ):
if not i in squares:
if jacobi( i, m ) != -1:
error_tally = error_tally + 1
print_("jacobi( %d, %d ) != -1" % ( i, m ))
else: # m is not prime.
f = factorization( m )
for a in range( 1, m ):
c = 1
for i in f:
c = c * jacobi( a, i[0] ) ** i[1]
if c != jacobi( a, m ):
error_tally = error_tally + 1
print_("%d != jacobi( %d, %d )" % ( c, a, m ))
# Test the inverse_mod function:
print_("Testing inverse_mod . . .")
import random
n_tests = 0
for i in range( 100 ):
m = random.randint( 20, 10000 )
for j in range( 100 ):
a = random.randint( 1, m-1 )
if gcd( a, m ) == 1:
n_tests = n_tests + 1
inv = inverse_mod( a, m )
if inv <= 0 or inv >= m or ( a * inv ) % m != 1:
error_tally = error_tally + 1
print_("%d = inverse_mod( %d, %d ) is wrong." % ( inv, a, m ))
assert n_tests > 1000
print_(n_tests, " tests of inverse_mod completed.")
class FailedTest(Exception): pass
print_(error_tally, "errors detected.")
if error_tally != 0:
raise FailedTest("%d errors detected" % error_tally)
if __name__ == '__main__':
__main__()
|
NCI-Cloud/horizon
|
refs/heads/nci/kilo
|
openstack_dashboard/dashboards/admin/metadata_defs/tests.py
|
29
|
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
from openstack_dashboard.test import helpers as test
class MetadataDefinitionsView(test.BaseAdminViewTests):
def test_namespace_object(self):
mock = self.mox.CreateMockAnything()
mock.name = 'hello'
mock.description = 'world'
mock.visibility = 'public'
mock.resource_type_associations = [{'name': 'sample'}]
namespace = api.glance.Namespace(mock)
self.assertEqual('world', namespace.description)
self.assertTrue(namespace.public)
self.assertEqual('sample', namespace.resource_type_names[0])
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_list(self):
namespace_list = self.metadata_defs.list()
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn((namespace_list, False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
self.assertEqual(len(res.context['namespaces_table'].data),
len(namespace_list))
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_no_results(self):
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn(((), False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
self.assertEqual(len(res.context['namespaces_table'].data), 0)
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_error(self):
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_delete_availability(self):
namespace_list = self.metadata_defs.list()
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn((namespace_list, False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertIn('namespaces_table', res.context)
ns_table = res.context['namespaces_table']
namespaces = ns_table.data
for i in [1, 2]:
row_actions = ns_table.get_row_actions(namespaces[i])
self.assertTrue(len(row_actions), 2)
self.assertTrue('delete' in
[a.name for a in row_actions])
self.assertTrue('manage_resource_types' in
[a.name for a in row_actions])
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get_contents(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(
'?'.join([reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}),
'='.join(['tab', 'namespace_details__contents'])]))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get_overview(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(
'?'.join([reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}),
'='.join(['tab', 'namespace_details__overview'])]))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_resource_types_list',
'metadefs_namespace_resource_types')})
def test_metadata_defs_manage_resource_types(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_resource_types(
IsA(http.HttpRequest),
'1'
).AndReturn(namespace.resource_type_associations)
api.glance.metadefs_resource_types_list(
IsA(http.HttpRequest)
).AndReturn(namespace.resource_type_associations)
self.mox.ReplayAll()
res = self.client.get(
reverse(constants.METADATA_MANAGE_RESOURCES_URL,
kwargs={'id': '1'}))
self.assertTemplateUsed(res,
constants.METADATA_MANAGE_RESOURCES_TEMPLATE)
self.assertContains(res, 'mock name')
@test.create_stubs({api.glance: ('metadefs_namespace_resource_types',
'metadefs_namespace_remove_resource_type',
'metadefs_namespace_add_resource_type')})
def test_metadata_defs_manage_resource_types_change(self):
resource_type_associations = [
{
'prefix': 'mock1_prefix',
'name': 'mock1'
},
{
'prefix': 'mock2_prefix',
'name': 'mock2',
'selected': True
}
]
api.glance.metadefs_namespace_resource_types(
IsA(http.HttpRequest),
'1'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_remove_resource_type(
IsA(http.HttpRequest),
'1',
'mock1'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_remove_resource_type(
IsA(http.HttpRequest),
'1',
'mock2'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_add_resource_type(
IsA(http.HttpRequest),
'1',
{
'prefix': 'mock2_prefix',
'name': 'mock2'
}
).AndReturn(resource_type_associations)
self.mox.ReplayAll()
form_data = {'resource_types': json.dumps(resource_type_associations)}
res = self.client.post(
reverse(constants.METADATA_MANAGE_RESOURCES_URL,
kwargs={'id': '1'}),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(
res, reverse(constants.METADATA_INDEX_URL)
)
class MetadataDefinitionsCreateViewTest(test.BaseAdminViewTests):
def test_admin_metadata_defs_create_namespace_get(self):
res = self.client.get(reverse(constants.METADATA_CREATE_URL))
self.assertTemplateUsed(res, constants.METADATA_CREATE_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_create',)})
def test_admin_metadata_defs_create_namespace_post(self):
metadata = {}
metadata["namespace"] = "test_namespace"
metadata["display_name"] = "display_name"
metadata["description"] = "description"
metadata["visibility"] = "private"
metadata["protected"] = False
api.glance.metadefs_namespace_create(
IsA(http.HttpRequest),
metadata
).AndReturn(metadata)
self.mox.ReplayAll()
form_data = {
'source_type': 'raw',
'direct_input': json.dumps(metadata)
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertNoFormErrors(res)
def test_admin_metadata_defs_create_namespace_invalid_json_post_raw(self):
form_data = {
'source_type': 'raw',
'direct_input': 'invalidjson'
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['There was a problem loading '
'the namespace: No JSON '
'object could be decoded.'])
def test_admin_metadata_defs_create_namespace_empty_json_post_raw(self):
form_data = {
'source_type': 'raw',
'direct_input': ''
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['No input was provided for '
'the namespace content.'])
def test_admin_metadata_defs_create_namespace_empty_json_post_file(self):
form_data = {
'source_type': 'raw',
'direct_input': ''
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['No input was provided for '
'the namespace content.'])
|
litnimax/astconfman
|
refs/heads/master
|
astconfman/config.py
|
1
|
# *-* encoding: utf-8 *-*
import os
from flask_babelex import lazy_gettext as _
# Default Language. Currenly only 'ru' and 'en' are supported.
LANGUAGE = 'en'
# Put here some random string
SECRET_KEY = 'change_me_here_to_random_key'
# BRAND_NAV - this defines the string on the right top navigation bar
BRAND_NAV = u'Asterisk Conference Manager'
# BRAND_FOOTER - put here your company info
BRAND_FOOTER = _(u"""(C) 2015 Asterisk Guru | <a href="http://asteriskguru.ru/">www.asteriskguru.ru</a> | Professional Asterisk support & development services.""")
# BRAND_LOGO - replace logo.png or change here url to your own logo
BRAND_LOGO = 'static/logo.png'
# URL to redirect when clicked on LOGO. Put here '#' if redirect is not required.
BRAND_LOGO_URL = 'http://www.pbxware.ru/'
# ASTERISK_IPADDR - IP Address of Asterisk server. All other requests will be denied.
ASTERISK_IPADDR = '127.0.0.1'
# LISTEN_ADDRESS - Interfaces to bind to. '0.0.0.0' for all interfaces.
LISTEN_ADDRESS = '127.0.0.1'
# LISTEN_PORT - Port to listen on.
LISTEN_PORT = 5000
# Always leave DEBUG=False in production. DEBUG=True is a security hole as it
# allows the execution of arbitrary Python code. Be warned!
DEBUG = False
# SQLALCHEMY_ECHO - prints SQL statements.
SQLALCHEMY_ECHO = False
# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#database-urls
DATABASE_FILE = os.path.join(os.path.dirname(__file__), 'astconfman.db')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_FILE
WTF_CSRF_ENABLED = True
SECURITY_REGISTERABLE = False
SECURITY_RECOVERABLE = False
SECURITY_SEND_PASSWORD_CHANGE_EMAIL = False
SECURITY_USER_IDENTITY_ATTRIBUTES = 'username'
SECURITY_PASSWORD_HASH = 'sha512_crypt'
SECURITY_PASSWORD_SALT = 'bla-bla-bla'
# Asterisk
ASTERISK_SPOOL_DIR = '/var/spool/asterisk/outgoing/'
ASTERISK_MONITOR_DIR = '/var/spool/asterisk/monitor/'
ASTERISK_EXECUTABLE = '/usr/sbin/asterisk'
ASTERISK_SSH_ENABLED = False
ASTERISK_SSH_PORT = '22'
ASTERISK_SSH_HOST = 'localhost'
ASTERISK_SSH_USER = 'asterisk'
ASTERISK_SSH_KEY = 'ssh-rsa AAAAB3NzaC1yc2EA...' # Put your key in instance config
# You can remove any tab by adding it here.
DISABLED_TABS = []
# Callout template.
CALLOUT_TEMPLATE = """Channel: Local/%(number)s@confman-dialout
Context: confman-bridge
Extension: %(confnum)s
Priority: 1
MaxRetries: 0
RetryTime: 15
WaitTime: 300
Set: participant_name=%(name)s
Set: participant_number=%(number)s
Set: conf_number=%(confnum)s
"""
|
alviano/wasp
|
refs/heads/master
|
tests/asp/cautious/bug1.asp.cautious.asp.test.py
|
1
|
input = """
1 2 2 1 3 4
1 3 2 1 2 4
1 4 0 0
1 2 2 1 5 6
1 5 2 1 2 6
1 6 0 0
1 2 2 1 7 8
1 7 2 1 2 8
1 8 0 0
1 2 2 1 9 10
1 9 2 1 2 10
1 10 0 0
1 2 2 1 11 12
1 11 2 1 2 12
1 12 0 0
1 2 2 1 13 14
1 13 2 1 2 14
1 14 0 0
1 2 2 1 15 16
1 15 2 1 2 16
1 16 0 0
1 2 2 1 17 18
1 17 2 1 2 18
1 18 0 0
1 2 2 1 19 20
1 19 2 1 2 20
1 20 0 0
1 21 2 1 22 23
1 22 2 1 21 23
1 23 0 0
1 21 2 1 24 25
1 24 2 1 21 25
1 25 0 0
1 21 2 1 26 27
1 26 2 1 21 27
1 27 0 0
1 21 2 1 28 29
1 28 2 1 21 29
1 29 0 0
1 21 2 1 30 31
1 30 2 1 21 31
1 31 0 0
1 21 2 1 32 33
1 32 2 1 21 33
1 33 0 0
1 34 2 0 2 21
1 35 2 0 2 21
1 1 2 0 34 35
1 1 2 1 35 34
1 1 2 1 34 35
1 1 2 2 34 35
0
34 a
15 c6
24 e1
32 e5
2 c
9 c3
13 c5
3 d
22 f
30 e4
11 c4
5 c1
21 e
28 e3
35 b
19 c8
17 c7
26 e2
7 c2
0
B+
0
B-
1
0
1
"""
output = """
INCOHERENT
"""
|
ashwyn/eden-message_parser
|
refs/heads/master
|
static/scripts/tools/import_gadm.py
|
37
|
# Script to import all countries in Asia Pacific (except Timor Leste, for which we use the UN dataset)
#
# run as python web2py.py -S eden -M -R applications/eden/static/scripts/tools/import_gadm.py
#
import time
secs = time.mktime(time.localtime())
# Asia Pacific less TL
countries = [ "AF", "AU", "BD", "BN", "CK", "CN", "FJ", "FM", "HK", "ID", "IN", "JP", "KH", "KI", "KP", "KR", "LA", "MH", "MM", "MN", "MV", "MY", "NP", "NZ", "PG", "PH", "PK", "PW", "SB", "SG", "SL", "TH", "TO", "TV", "TW", "VN", "VU", "WS"]
gis.import_admin_areas(countries=countries)
db.commit()
print "Total Time: %s" % (time.mktime(time.localtime()) - secs)
|
robcarver17/pysystemtrade
|
refs/heads/master
|
sysbrokers/IB/ib_instruments_data.py
|
1
|
import pandas as pd
from sysbrokers.IB.ib_instruments import NOT_REQUIRED_FOR_IB, ibInstrumentConfigData, futuresInstrumentWithIBConfigData
from sysbrokers.IB.ib_connection import connectionIB
from sysbrokers.broker_instrument_data import brokerFuturesInstrumentData
from syscore.fileutils import get_filename_for_package
from syscore.genutils import value_or_npnan
from syscore.objects import missing_instrument, missing_file
from sysobjects.instruments import futuresInstrument
from syslogdiag.log_to_screen import logtoscreen
IB_FUTURES_CONFIG_FILE = get_filename_for_package(
"sysbrokers.IB.ib_config_futures.csv")
class IBconfig(pd.DataFrame):
pass
def read_ib_config_from_file() -> IBconfig:
df = pd.read_csv(IB_FUTURES_CONFIG_FILE)
return IBconfig(df)
class ibFuturesInstrumentData(brokerFuturesInstrumentData):
"""
Extends the baseData object to a data source that reads in and writes prices for specific futures contracts
This gets HISTORIC data from interactive brokers. It is blocking code
In a live production system it is suitable for running on a daily basis to get end of day prices
"""
def __init__(self, ibconnection: connectionIB, log=logtoscreen("ibFuturesContractData")):
super().__init__(log=log)
self._ibconnection = ibconnection
def __repr__(self):
return "IB Futures per contract data %s" % str(self.ibconnection)
@property
def ibconnection(self) -> connectionIB:
return self._ibconnection
def get_brokers_instrument_code(self, instrument_code:str) -> str:
futures_instrument_with_ib_data = self.get_futures_instrument_object_with_IB_data(instrument_code)
return futures_instrument_with_ib_data.broker_symbol
def get_instrument_code_from_broker_code(self, ib_code: str) -> str:
config = self._get_ib_config()
config_row = config[config.IBSymbol == ib_code]
if len(config_row) == 0:
msg = "Broker symbol %s not found in configuration file!" % ib_code
self.log.critical(msg)
raise Exception(msg)
if len(config_row) > 1:
msg = (
"Broker symbol %s appears more than once in configuration file!" %
ib_code)
self.log.critical(msg)
raise Exception(msg)
return config_row.iloc[0].Instrument
def _get_instrument_data_without_checking(self, instrument_code: str):
return self.get_futures_instrument_object_with_IB_data(instrument_code)
def get_futures_instrument_object_with_IB_data(self, instrument_code:str) ->futuresInstrumentWithIBConfigData:
new_log = self.log.setup(instrument_code=instrument_code)
try:
assert instrument_code in self.get_list_of_instruments()
except:
new_log.warn(
"Instrument %s is not in IB configuration file" %
instrument_code)
return missing_instrument
config = self._get_ib_config()
if config is missing_file:
new_log.warn(
"Can't get config for instrument %s as IB configuration file missing" %
instrument_code)
return missing_instrument
instrument_object = get_instrument_object_from_config(
instrument_code, config=config
)
return instrument_object
def get_list_of_instruments(self) -> list:
"""
Get instruments that have price data
Pulls these in from a config file
:return: list of str
"""
config = self._get_ib_config()
if config is missing_file:
self.log.warn(
"Can't get list of instruments because IB config file missing"
)
return []
instrument_list = list(config.Instrument)
return instrument_list
# Configuration read in and cache
def _get_ib_config(self) -> IBconfig:
config = getattr(self, "_config", None)
if config is None:
config = self._get_and_set_ib_config_from_file()
return config
def _get_and_set_ib_config_from_file(self) -> IBconfig:
try:
config_data = read_ib_config_from_file()
except BaseException:
self.log.warn("Can't read file %s" % IB_FUTURES_CONFIG_FILE)
config_data = missing_file
self._config = config_data
return config_data
def _delete_instrument_data_without_any_warning_be_careful(self,
instrument_code: str):
raise NotImplementedError("IB instrument config is read only - manually edit .csv file %s" % IB_FUTURES_CONFIG_FILE)
def _add_instrument_data_without_checking_for_existing_entry(
self, instrument_object
):
raise NotImplementedError(
"IB instrument config is read only - manually edit .csv file %s" % IB_FUTURES_CONFIG_FILE)
def get_instrument_object_from_config(instrument_code: str,
config: IBconfig=None) ->futuresInstrumentWithIBConfigData:
if config is None:
config = read_ib_config_from_file()
config_row = config[config.Instrument == instrument_code]
symbol = config_row.IBSymbol.values[0]
exchange = config_row.IBExchange.values[0]
currency = value_or_npnan(config_row.IBCurrency.values[0], NOT_REQUIRED_FOR_IB)
ib_multiplier = value_or_npnan(
config_row.IBMultiplier.values[0], NOT_REQUIRED_FOR_IB)
my_multiplier = value_or_npnan(
config_row.MyMultiplier.values[0], 1.0)
ignore_weekly = config_row.IgnoreWeekly.values[0]
# We use the flexibility of futuresInstrument to add additional arguments
instrument = futuresInstrument(instrument_code)
ib_data = ibInstrumentConfigData(symbol, exchange, currency=currency,
ibMultiplier=ib_multiplier,
myMultiplier=my_multiplier,
ignoreWeekly=ignore_weekly
)
futures_instrument_with_ib_data = futuresInstrumentWithIBConfigData(instrument, ib_data)
return futures_instrument_with_ib_data
|
xbianonpi/Sick-Beard-TPB
|
refs/heads/ThePirateBay
|
lib/unidecode/x01e.py
|
246
|
data = (
'A', # 0x00
'a', # 0x01
'B', # 0x02
'b', # 0x03
'B', # 0x04
'b', # 0x05
'B', # 0x06
'b', # 0x07
'C', # 0x08
'c', # 0x09
'D', # 0x0a
'd', # 0x0b
'D', # 0x0c
'd', # 0x0d
'D', # 0x0e
'd', # 0x0f
'D', # 0x10
'd', # 0x11
'D', # 0x12
'd', # 0x13
'E', # 0x14
'e', # 0x15
'E', # 0x16
'e', # 0x17
'E', # 0x18
'e', # 0x19
'E', # 0x1a
'e', # 0x1b
'E', # 0x1c
'e', # 0x1d
'F', # 0x1e
'f', # 0x1f
'G', # 0x20
'g', # 0x21
'H', # 0x22
'h', # 0x23
'H', # 0x24
'h', # 0x25
'H', # 0x26
'h', # 0x27
'H', # 0x28
'h', # 0x29
'H', # 0x2a
'h', # 0x2b
'I', # 0x2c
'i', # 0x2d
'I', # 0x2e
'i', # 0x2f
'K', # 0x30
'k', # 0x31
'K', # 0x32
'k', # 0x33
'K', # 0x34
'k', # 0x35
'L', # 0x36
'l', # 0x37
'L', # 0x38
'l', # 0x39
'L', # 0x3a
'l', # 0x3b
'L', # 0x3c
'l', # 0x3d
'M', # 0x3e
'm', # 0x3f
'M', # 0x40
'm', # 0x41
'M', # 0x42
'm', # 0x43
'N', # 0x44
'n', # 0x45
'N', # 0x46
'n', # 0x47
'N', # 0x48
'n', # 0x49
'N', # 0x4a
'n', # 0x4b
'O', # 0x4c
'o', # 0x4d
'O', # 0x4e
'o', # 0x4f
'O', # 0x50
'o', # 0x51
'O', # 0x52
'o', # 0x53
'P', # 0x54
'p', # 0x55
'P', # 0x56
'p', # 0x57
'R', # 0x58
'r', # 0x59
'R', # 0x5a
'r', # 0x5b
'R', # 0x5c
'r', # 0x5d
'R', # 0x5e
'r', # 0x5f
'S', # 0x60
's', # 0x61
'S', # 0x62
's', # 0x63
'S', # 0x64
's', # 0x65
'S', # 0x66
's', # 0x67
'S', # 0x68
's', # 0x69
'T', # 0x6a
't', # 0x6b
'T', # 0x6c
't', # 0x6d
'T', # 0x6e
't', # 0x6f
'T', # 0x70
't', # 0x71
'U', # 0x72
'u', # 0x73
'U', # 0x74
'u', # 0x75
'U', # 0x76
'u', # 0x77
'U', # 0x78
'u', # 0x79
'U', # 0x7a
'u', # 0x7b
'V', # 0x7c
'v', # 0x7d
'V', # 0x7e
'v', # 0x7f
'W', # 0x80
'w', # 0x81
'W', # 0x82
'w', # 0x83
'W', # 0x84
'w', # 0x85
'W', # 0x86
'w', # 0x87
'W', # 0x88
'w', # 0x89
'X', # 0x8a
'x', # 0x8b
'X', # 0x8c
'x', # 0x8d
'Y', # 0x8e
'y', # 0x8f
'Z', # 0x90
'z', # 0x91
'Z', # 0x92
'z', # 0x93
'Z', # 0x94
'z', # 0x95
'h', # 0x96
't', # 0x97
'w', # 0x98
'y', # 0x99
'a', # 0x9a
'S', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'Ss', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'a', # 0xa1
'A', # 0xa2
'a', # 0xa3
'A', # 0xa4
'a', # 0xa5
'A', # 0xa6
'a', # 0xa7
'A', # 0xa8
'a', # 0xa9
'A', # 0xaa
'a', # 0xab
'A', # 0xac
'a', # 0xad
'A', # 0xae
'a', # 0xaf
'A', # 0xb0
'a', # 0xb1
'A', # 0xb2
'a', # 0xb3
'A', # 0xb4
'a', # 0xb5
'A', # 0xb6
'a', # 0xb7
'E', # 0xb8
'e', # 0xb9
'E', # 0xba
'e', # 0xbb
'E', # 0xbc
'e', # 0xbd
'E', # 0xbe
'e', # 0xbf
'E', # 0xc0
'e', # 0xc1
'E', # 0xc2
'e', # 0xc3
'E', # 0xc4
'e', # 0xc5
'E', # 0xc6
'e', # 0xc7
'I', # 0xc8
'i', # 0xc9
'I', # 0xca
'i', # 0xcb
'O', # 0xcc
'o', # 0xcd
'O', # 0xce
'o', # 0xcf
'O', # 0xd0
'o', # 0xd1
'O', # 0xd2
'o', # 0xd3
'O', # 0xd4
'o', # 0xd5
'O', # 0xd6
'o', # 0xd7
'O', # 0xd8
'o', # 0xd9
'O', # 0xda
'o', # 0xdb
'O', # 0xdc
'o', # 0xdd
'O', # 0xde
'o', # 0xdf
'O', # 0xe0
'o', # 0xe1
'O', # 0xe2
'o', # 0xe3
'U', # 0xe4
'u', # 0xe5
'U', # 0xe6
'u', # 0xe7
'U', # 0xe8
'u', # 0xe9
'U', # 0xea
'u', # 0xeb
'U', # 0xec
'u', # 0xed
'U', # 0xee
'u', # 0xef
'U', # 0xf0
'u', # 0xf1
'Y', # 0xf2
'y', # 0xf3
'Y', # 0xf4
'y', # 0xf5
'Y', # 0xf6
'y', # 0xf7
'Y', # 0xf8
'y', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
dipapaspyros/bdo_platform
|
refs/heads/master
|
bdo_platform/settings_management/keys_example.py
|
2
|
# AWS id/access key for s3
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
|
acshi/osf.io
|
refs/heads/develop
|
addons/mendeley/routes.py
|
10
|
# -*- coding: utf-8 -*-
from framework.routing import Rule, json_renderer
from addons.mendeley.views import mendeley_views
api_routes = {
'rules': [
Rule(
[
'/settings/mendeley/accounts/',
],
'get',
mendeley_views.account_list(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/settings/',
'/project/<pid>/node/<nid>/mendeley/settings/',
],
'get',
mendeley_views.get_config(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/settings/',
'/project/<pid>/node/<nid>/mendeley/settings/',
],
'put',
mendeley_views.set_config(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/user_auth/',
'/project/<pid>/node/<nid>/mendeley/user_auth/',
],
'put',
mendeley_views.import_auth(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/user_auth/',
'/project/<pid>/node/<nid>/mendeley/user_auth/',
],
'delete',
mendeley_views.deauthorize_node(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/widget/',
'/project/<pid>/node/<nid>/mendeley/widget/',
],
'get',
mendeley_views.widget(),
json_renderer,
),
Rule(
[
'/project/<pid>/mendeley/citations/',
'/project/<pid>/node/<nid>/mendeley/citations/',
'/project/<pid>/mendeley/citations/<list_id>/',
'/project/<pid>/node/<nid>/mendeley/citations/<list_id>/',
],
'get',
mendeley_views.citation_list(),
json_renderer,
),
],
'prefix': '/api/v1',
}
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/distutils/command/build_clib.py
|
176
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id$"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import DistutilsSetupError
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if not isinstance(build_info, dict):
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
|
Y-oHr-N/kenchi
|
refs/heads/master
|
kenchi/outlier_detection/ensemble.py
|
1
|
from sklearn.ensemble import IsolationForest
from sklearn.utils.validation import check_is_fitted
from .base import BaseOutlierDetector
__all__ = ['IForest']
class IForest(BaseOutlierDetector):
"""Isolation forest (iForest).
Parameters
----------
bootstrap : bool, False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
contamination : float, default 'auto'
Proportion of outliers in the data set. Used to define the threshold.
max_features : int or float, default 1.0
Number of features to draw from X to train each base estimator.
max_samples : int ,float or str, default 'auto'
Number of samples to draw from X to train each base estimator.
n_estimators : int, default 100
Number of base estimators in the ensemble.
n_jobs : int
Number of jobs to run in parallel. If -1, then the number of jobs is
set to the number of CPU cores.
random_state : int or RandomState instance, default None
Seed of the pseudo random number generator.
Attributes
----------
anomaly_score_ : array-like of shape (n_samples,)
Anomaly score for each training data.
contamination_ : float
Actual proportion of outliers in the data set.
threshold_ : float
Threshold.
References
----------
.. [#liu08] Liu, F. T., Ting, K. M., and Zhou, Z.-H.,
"Isolation forest,"
In Proceedings of ICDM, pp. 413-422, 2008.
Examples
--------
>>> import numpy as np
>>> from kenchi.outlier_detection import IForest
>>> X = np.array([
... [0., 0.], [1., 1.], [2., 0.], [3., -1.], [4., 0.],
... [5., 1.], [6., 0.], [7., -1.], [8., 0.], [1000., 1.]
... ])
>>> det = IForest(random_state=0)
>>> det.fit_predict(X)
array([ 1, 1, 1, 1, 1, 1, 1, 1, 1, -1])
"""
@property
def estimators_(self):
"""list: Collection of fitted sub-estimators.
"""
return self.estimator_.estimators_
@property
def estimators_samples_(self):
"""int: Subset of drawn samples for each base estimator.
"""
return self.estimator_.estimators_samples_
@property
def max_samples_(self):
"""int: Actual number of samples.
"""
return self.estimator_.max_samples_
def __init__(
self, bootstrap=False, contamination='auto', max_features=1.0,
max_samples='auto', n_estimators=100, n_jobs=1, random_state=None
):
self.bootstrap = bootstrap
self.contamination = contamination
self.max_features = max_features
self.max_samples = max_samples
self.n_estimators = n_estimators
self.n_jobs = n_jobs
self.random_state = random_state
def _check_is_fitted(self):
super()._check_is_fitted()
check_is_fitted(
self, ['estimators_', 'estimators_samples_', 'max_samples_']
)
def _get_threshold(self):
return -self.estimator_.offset_
def _fit(self, X):
self.estimator_ = IsolationForest(
behaviour = 'new',
bootstrap = self.bootstrap,
contamination = self.contamination,
max_features = self.max_features,
max_samples = self.max_samples,
n_estimators = self.n_estimators,
n_jobs = self.n_jobs,
random_state = self.random_state
).fit(X)
return self
def _anomaly_score(self, X):
return -self.estimator_.score_samples(X)
|
turbokongen/home-assistant
|
refs/heads/dev
|
homeassistant/components/nest/camera_sdm.py
|
2
|
"""Support for Google Nest SDM Cameras."""
import datetime
import logging
from typing import Optional
from google_nest_sdm.camera_traits import (
CameraEventImageTrait,
CameraImageTrait,
CameraLiveStreamTrait,
)
from google_nest_sdm.device import Device
from google_nest_sdm.exceptions import GoogleNestException
from haffmpeg.tools import IMAGE_JPEG
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import async_get_image
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from .const import DATA_SUBSCRIBER, DOMAIN
from .device_info import DeviceInfo
_LOGGER = logging.getLogger(__name__)
# Used to schedule an alarm to refresh the stream before expiration
STREAM_EXPIRATION_BUFFER = datetime.timedelta(seconds=30)
async def async_setup_sdm_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the cameras."""
subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER]
try:
device_manager = await subscriber.async_get_device_manager()
except GoogleNestException as err:
raise PlatformNotReady from err
# Fetch initial data so we have data when entities subscribe.
entities = []
for device in device_manager.devices.values():
if (
CameraImageTrait.NAME in device.traits
or CameraLiveStreamTrait.NAME in device.traits
):
entities.append(NestCamera(device))
async_add_entities(entities)
class NestCamera(Camera):
"""Devices that support cameras."""
def __init__(self, device: Device):
"""Initialize the camera."""
super().__init__()
self._device = device
self._device_info = DeviceInfo(device)
self._stream = None
self._stream_refresh_unsub = None
# Cache of most recent event image
self._event_id = None
self._event_image_bytes = None
self._event_image_cleanup_unsub = None
@property
def should_poll(self) -> bool:
"""Disable polling since entities have state pushed via pubsub."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
# The API "name" field is a unique device identifier.
return f"{self._device.name}-camera"
@property
def name(self):
"""Return the name of the camera."""
return self._device_info.device_name
@property
def device_info(self):
"""Return device specific attributes."""
return self._device_info.device_info
@property
def brand(self):
"""Return the camera brand."""
return self._device_info.device_brand
@property
def model(self):
"""Return the camera model."""
return self._device_info.device_model
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if CameraLiveStreamTrait.NAME in self._device.traits:
supported_features |= SUPPORT_STREAM
return supported_features
async def stream_source(self):
"""Return the source of the stream."""
if CameraLiveStreamTrait.NAME not in self._device.traits:
return None
trait = self._device.traits[CameraLiveStreamTrait.NAME]
if not self._stream:
_LOGGER.debug("Fetching stream url")
self._stream = await trait.generate_rtsp_stream()
self._schedule_stream_refresh()
if self._stream.expires_at < utcnow():
_LOGGER.warning("Stream already expired")
return self._stream.rtsp_stream_url
def _schedule_stream_refresh(self):
"""Schedules an alarm to refresh the stream url before expiration."""
_LOGGER.debug("New stream url expires at %s", self._stream.expires_at)
refresh_time = self._stream.expires_at - STREAM_EXPIRATION_BUFFER
# Schedule an alarm to extend the stream
if self._stream_refresh_unsub is not None:
self._stream_refresh_unsub()
self._stream_refresh_unsub = async_track_point_in_utc_time(
self.hass,
self._handle_stream_refresh,
refresh_time,
)
async def _handle_stream_refresh(self, now):
"""Alarm that fires to check if the stream should be refreshed."""
if not self._stream:
return
_LOGGER.debug("Extending stream url")
try:
self._stream = await self._stream.extend_rtsp_stream()
except GoogleNestException as err:
_LOGGER.debug("Failed to extend stream: %s", err)
# Next attempt to catch a url will get a new one
self._stream = None
return
# Update the stream worker with the latest valid url
if self.stream:
self.stream.update_source(self._stream.rtsp_stream_url)
self._schedule_stream_refresh()
async def async_will_remove_from_hass(self):
"""Invalidates the RTSP token when unloaded."""
if self._stream:
_LOGGER.debug("Invalidating stream")
await self._stream.stop_rtsp_stream()
if self._stream_refresh_unsub:
self._stream_refresh_unsub()
self._event_id = None
self._event_image_bytes = None
if self._event_image_cleanup_unsub is not None:
self._event_image_cleanup_unsub()
async def async_added_to_hass(self):
"""Run when entity is added to register update signal handler."""
self.async_on_remove(
self._device.add_update_listener(self.async_write_ha_state)
)
async def async_camera_image(self):
"""Return bytes of camera image."""
# Returns the snapshot of the last event for ~30 seconds after the event
active_event_image = await self._async_active_event_image()
if active_event_image:
return active_event_image
# Fetch still image from the live stream
stream_url = await self.stream_source()
if not stream_url:
return None
return await async_get_image(self.hass, stream_url, output_format=IMAGE_JPEG)
async def _async_active_event_image(self):
"""Return image from any active events happening."""
if CameraEventImageTrait.NAME not in self._device.traits:
return None
trait = self._device.active_event_trait
if not trait:
return None
# Reuse image bytes if they have already been fetched
event = trait.last_event
if self._event_id is not None and self._event_id == event.event_id:
return self._event_image_bytes
_LOGGER.debug("Generating event image URL for event_id %s", event.event_id)
image_bytes = await self._async_fetch_active_event_image(trait)
if image_bytes is None:
return None
self._event_id = event.event_id
self._event_image_bytes = image_bytes
self._schedule_event_image_cleanup(event.expires_at)
return image_bytes
async def _async_fetch_active_event_image(self, trait):
"""Return image bytes for an active event."""
try:
event_image = await trait.generate_active_event_image()
except GoogleNestException as err:
_LOGGER.debug("Unable to generate event image URL: %s", err)
return None
if not event_image:
return None
try:
return await event_image.contents()
except GoogleNestException as err:
_LOGGER.debug("Unable to fetch event image: %s", err)
return None
def _schedule_event_image_cleanup(self, point_in_time):
"""Schedules an alarm to remove the image bytes from memory, honoring expiration."""
if self._event_image_cleanup_unsub is not None:
self._event_image_cleanup_unsub()
self._event_image_cleanup_unsub = async_track_point_in_utc_time(
self.hass,
self._handle_event_image_cleanup,
point_in_time,
)
def _handle_event_image_cleanup(self, now):
"""Clear images cached from events and scheduled callback."""
self._event_id = None
self._event_image_bytes = None
self._event_image_cleanup_unsub = None
|
TedaLIEz/sentry
|
refs/heads/master
|
src/sentry/plugins/base/structs.py
|
30
|
"""
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ('Annotation', 'Notification')
import warnings
class Annotation(object):
__slots__ = ['label', 'url', 'description']
def __init__(self, label, url=None, description=None):
self.label = label
self.url = url
self.description = description
class Notification(object):
__slots__ = ['event', 'rule', 'rules']
def __init__(self, event, rule=None, rules=None):
if rule and not rules:
rules = [rule]
self.event = event
self.rules = rules or []
@property
def rule(self):
warnings.warn('Notification.rule is deprecated. Switch to Notification.rules.',
DeprecationWarning)
return self.rules[0]
|
CyanogenMod/android_external_chromium-trace
|
refs/heads/cm-10.2
|
trace-viewer/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py
|
135
|
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
'The whole class, not the object, should be required.');
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
self.assertEquals('package.Foo.veryLong.identifier',
namespaces_info._GetWholeIdentifierString(token))
self.assertEquals(None,
namespaces_info._GetWholeIdentifierString(token.next))
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
while token:
namespaces_info.ProcessToken(token, state_tracker)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
if __name__ == '__main__':
googletest.main()
|
dalou/django-templatetags-bundle
|
refs/heads/master
|
setup.py
|
1
|
import ast
import os
import sys
import codecs
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def find_version(*parts):
try:
version_py = os.path.join(os.path.dirname(__file__), 'templatetags_bundle/version.py')
version_git = subprocess.check_output(["git", "tag"]).rstrip().splitlines()[-1]
version_msg = "# Do not edit this file, pipeline versioning is governed by git tags" + os.linesep + "# following PEP 386"
open(version_py, 'wb').write(version_msg + os.linesep + '__version__ = "%s"' % version_git)
except:
# NOT RAN LOCALY
pass
from templatetags_bundle.version import __version__
return "{ver}".format(ver=__version__)
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
setup(
name='django-templatetags-bundle',
version=find_version(),
description='Django templatetags extra bundles',
long_description=read('README.rst'),
author='Autrusseau Damien',
author_email='autrusseau.damien@gmail.com',
url='http://github.com/dalou/django-templatetags-bundle',
packages=find_packages(),
zip_safe=False,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
install_requires=[
'django >= 1.8.4, <= 1.9',
'babel >= 2.0',
'bleach >= 1.4.2',
'django-classy-tags == 0.5.1'
],
)
|
geminy/aidear
|
refs/heads/master
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/tools/python/google/platform_utils_win.py
|
193
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Platform-specific utility methods shared by several scripts."""
import os
import re
import subprocess
import sys
import google.path_utils
# Cache a single cygpath process for use throughout, even across instances of
# the PlatformUtility class.
_cygpath_proc = None
class PlatformUtility(object):
def __init__(self, base_dir):
"""Args:
base_dir: a directory above which third_party/cygwin can be found,
used to locate the cygpath executable for path conversions.
"""
self._cygwin_root = None
self._base_dir = base_dir
def _CygwinRoot(self):
"""Returns the full path to third_party/cygwin/."""
if not self._cygwin_root:
self._cygwin_root = google.path_utils.FindUpward(self._base_dir,
'third_party', 'cygwin')
return self._cygwin_root
def _PathToExecutable(self, executable):
"""Returns the full path to an executable in Cygwin's bin dir."""
return os.path.join(self._CygwinRoot(), 'bin', executable)
def GetAbsolutePath(self, path, force=False):
"""Returns an absolute windows path. If platform is cygwin, converts it to
windows style using cygpath.
For performance reasons, we use a single cygpath process, shared among all
instances of this class. Otherwise Python can run out of file handles.
"""
if not force and sys.platform != "cygwin":
return os.path.abspath(path)
global _cygpath_proc
if not _cygpath_proc:
cygpath_command = [self._PathToExecutable("cygpath.exe"),
"-a", "-m", "-f", "-"]
_cygpath_proc = subprocess.Popen(cygpath_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
_cygpath_proc.stdin.write(path + "\n")
return _cygpath_proc.stdout.readline().rstrip()
def GetFilesystemRoot(self):
"""Returns the root directory of the file system."""
return os.environ['SYSTEMDRIVE'] + '\\'
def GetTempDirectory(self):
"""Returns the file system's base temp directory, or the filesystem root
if the standard temp directory can't be determined.
Note that this does not use a random subdirectory, so it's not
intrinsically secure. If you need a secure subdir, use the tempfile
package.
"""
return os.environ.get('TEMP', self.GetFilesystemRoot())
def FilenameToUri(self, path, use_http=False, use_ssl=False, port=8000):
"""Convert a Windows style path to a URI.
Args:
path: For an http URI, the path relative to the httpd server's
DocumentRoot; for a file URI, the full path to the file.
use_http: if True, returns a URI of the form http://127.0.0.1:8000/.
If False, returns a file:/// URI.
use_ssl: if True, returns HTTPS URL (https://127.0.0.1:8000/).
This parameter is ignored if use_http=False.
port: The port number to append when returning an HTTP URI
"""
if use_http:
protocol = 'http'
if use_ssl:
protocol = 'https'
path = path.replace("\\", "/")
return "%s://127.0.0.1:%s/%s" % (protocol, str(port), path)
return "file:///" + self.GetAbsolutePath(path)
def GetStartHttpdCommand(self, output_dir,
httpd_conf_path, mime_types_path,
document_root=None, apache2=False):
"""Prepares the config file and output directory to start an httpd server.
Returns a list of strings containing the server's command line+args.
Args:
output_dir: the path to the server's output directory, for log files.
It will be created if necessary.
httpd_conf_path: full path to the httpd.conf file to be used.
mime_types_path: full path to the mime.types file to be used.
document_root: full path to the DocumentRoot. If None, the DocumentRoot
from the httpd.conf file will be used. Note that the httpd.conf
file alongside this script does not specify any DocumentRoot, so if
you're using that one, be sure to specify a document_root here.
apache2: boolean if true will cause this function to return start
command for Apache 2.x as opposed to Apache 1.3.x
"""
if document_root:
document_root = GetCygwinPath(document_root)
exe_name = "httpd"
cert_file = ""
if apache2:
exe_name = "httpd2"
cert_file = google.path_utils.FindUpward(self._base_dir, 'tools',
'python', 'google',
'httpd_config', 'httpd2.pem')
httpd_vars = {
"httpd_executable_path": GetCygwinPath(
os.path.join(self._CygwinRoot(), "usr", "sbin", exe_name)),
"httpd_conf_path": GetCygwinPath(httpd_conf_path),
"ssl_certificate_file": GetCygwinPath(cert_file),
"document_root" : document_root,
"server_root": GetCygwinPath(os.path.join(self._CygwinRoot(), "usr")),
"mime_types_path": GetCygwinPath(mime_types_path),
"output_dir": GetCygwinPath(output_dir),
"bindir": GetCygwinPath(os.path.join(self._CygwinRoot(), "bin")),
"user": os.environ.get("USERNAME", os.environ.get("USER", "")),
}
if not httpd_vars["user"]:
# Failed to get the username from the environment; use whoami.exe
# instead.
proc = subprocess.Popen(self._PathToExecutable("whoami.exe"),
stdout=subprocess.PIPE)
httpd_vars["user"] = proc.stdout.read().strip()
if not httpd_vars["user"]:
raise Exception("Failed to get username.")
google.path_utils.MaybeMakeDirectory(output_dir)
# We have to wrap the command in bash because the cygwin environment
# is required for httpd to run.
# -C: process directive before reading config files
# -c: process directive after reading config files
# Apache wouldn't run CGIs with permissions==700 unless we add
# -c User "<username>"
bash = self._PathToExecutable("bash.exe")
httpd_cmd_string = (
' PATH=%(bindir)s %(httpd_executable_path)s'
' -f %(httpd_conf_path)s'
' -c \'TypesConfig "%(mime_types_path)s"\''
' -c \'CustomLog "%(output_dir)s/access_log.txt" common\''
' -c \'ErrorLog "%(output_dir)s/error_log.txt"\''
' -c \'PidFile "%(output_dir)s/httpd.pid"\''
' -C \'User "%(user)s"\''
' -C \'ServerRoot "%(server_root)s"\''
)
if apache2:
httpd_cmd_string = ('export CYGWIN=server;' + httpd_cmd_string +
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\'')
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
httpd_cmd = [bash, "-c", httpd_cmd_string % httpd_vars]
return httpd_cmd
def GetStopHttpdCommand(self):
"""Returns a list of strings that contains the command line+args needed to
stop the http server used in the http tests.
"""
# Force kill (/f) *all* httpd processes. This has the side effect of
# killing httpd processes that we didn't start.
return ["taskkill.exe", "/f", "/im", "httpd*"]
###########################################################################
# This method is specific to windows, expected to be used only by *_win.py
# files.
def GetCygwinPath(path):
"""Convert a Windows path to a cygwin path.
The cygpath utility insists on converting paths that it thinks are Cygwin
root paths to what it thinks the correct roots are. So paths such as
"C:\b\slave\webkit-release-kjs\build\third_party\cygwin\bin" are converted to
plain "/usr/bin". To avoid this, we do the conversion manually.
The path is expected to be an absolute path, on any drive.
"""
drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE)
def LowerDrive(matchobj):
return '/cygdrive/%s/' % matchobj.group(1).lower()
path = drive_regexp.sub(LowerDrive, path)
return path.replace('\\', '/')
|
andrewleech/SickRage
|
refs/heads/master
|
lib/unidecode/x04f.py
|
252
|
data = (
'Zhong ', # 0x00
'Qi ', # 0x01
'Pei ', # 0x02
'Yu ', # 0x03
'Diao ', # 0x04
'Dun ', # 0x05
'Wen ', # 0x06
'Yi ', # 0x07
'Xin ', # 0x08
'Kang ', # 0x09
'Yi ', # 0x0a
'Ji ', # 0x0b
'Ai ', # 0x0c
'Wu ', # 0x0d
'Ji ', # 0x0e
'Fu ', # 0x0f
'Fa ', # 0x10
'Xiu ', # 0x11
'Jin ', # 0x12
'Bei ', # 0x13
'Dan ', # 0x14
'Fu ', # 0x15
'Tang ', # 0x16
'Zhong ', # 0x17
'You ', # 0x18
'Huo ', # 0x19
'Hui ', # 0x1a
'Yu ', # 0x1b
'Cui ', # 0x1c
'Chuan ', # 0x1d
'San ', # 0x1e
'Wei ', # 0x1f
'Chuan ', # 0x20
'Che ', # 0x21
'Ya ', # 0x22
'Xian ', # 0x23
'Shang ', # 0x24
'Chang ', # 0x25
'Lun ', # 0x26
'Cang ', # 0x27
'Xun ', # 0x28
'Xin ', # 0x29
'Wei ', # 0x2a
'Zhu ', # 0x2b
'[?] ', # 0x2c
'Xuan ', # 0x2d
'Nu ', # 0x2e
'Bo ', # 0x2f
'Gu ', # 0x30
'Ni ', # 0x31
'Ni ', # 0x32
'Xie ', # 0x33
'Ban ', # 0x34
'Xu ', # 0x35
'Ling ', # 0x36
'Zhou ', # 0x37
'Shen ', # 0x38
'Qu ', # 0x39
'Si ', # 0x3a
'Beng ', # 0x3b
'Si ', # 0x3c
'Jia ', # 0x3d
'Pi ', # 0x3e
'Yi ', # 0x3f
'Si ', # 0x40
'Ai ', # 0x41
'Zheng ', # 0x42
'Dian ', # 0x43
'Han ', # 0x44
'Mai ', # 0x45
'Dan ', # 0x46
'Zhu ', # 0x47
'Bu ', # 0x48
'Qu ', # 0x49
'Bi ', # 0x4a
'Shao ', # 0x4b
'Ci ', # 0x4c
'Wei ', # 0x4d
'Di ', # 0x4e
'Zhu ', # 0x4f
'Zuo ', # 0x50
'You ', # 0x51
'Yang ', # 0x52
'Ti ', # 0x53
'Zhan ', # 0x54
'He ', # 0x55
'Bi ', # 0x56
'Tuo ', # 0x57
'She ', # 0x58
'Yu ', # 0x59
'Yi ', # 0x5a
'Fo ', # 0x5b
'Zuo ', # 0x5c
'Kou ', # 0x5d
'Ning ', # 0x5e
'Tong ', # 0x5f
'Ni ', # 0x60
'Xuan ', # 0x61
'Qu ', # 0x62
'Yong ', # 0x63
'Wa ', # 0x64
'Qian ', # 0x65
'[?] ', # 0x66
'Ka ', # 0x67
'[?] ', # 0x68
'Pei ', # 0x69
'Huai ', # 0x6a
'He ', # 0x6b
'Lao ', # 0x6c
'Xiang ', # 0x6d
'Ge ', # 0x6e
'Yang ', # 0x6f
'Bai ', # 0x70
'Fa ', # 0x71
'Ming ', # 0x72
'Jia ', # 0x73
'Er ', # 0x74
'Bing ', # 0x75
'Ji ', # 0x76
'Hen ', # 0x77
'Huo ', # 0x78
'Gui ', # 0x79
'Quan ', # 0x7a
'Tiao ', # 0x7b
'Jiao ', # 0x7c
'Ci ', # 0x7d
'Yi ', # 0x7e
'Shi ', # 0x7f
'Xing ', # 0x80
'Shen ', # 0x81
'Tuo ', # 0x82
'Kan ', # 0x83
'Zhi ', # 0x84
'Gai ', # 0x85
'Lai ', # 0x86
'Yi ', # 0x87
'Chi ', # 0x88
'Kua ', # 0x89
'Guang ', # 0x8a
'Li ', # 0x8b
'Yin ', # 0x8c
'Shi ', # 0x8d
'Mi ', # 0x8e
'Zhu ', # 0x8f
'Xu ', # 0x90
'You ', # 0x91
'An ', # 0x92
'Lu ', # 0x93
'Mou ', # 0x94
'Er ', # 0x95
'Lun ', # 0x96
'Tong ', # 0x97
'Cha ', # 0x98
'Chi ', # 0x99
'Xun ', # 0x9a
'Gong ', # 0x9b
'Zhou ', # 0x9c
'Yi ', # 0x9d
'Ru ', # 0x9e
'Jian ', # 0x9f
'Xia ', # 0xa0
'Jia ', # 0xa1
'Zai ', # 0xa2
'Lu ', # 0xa3
'Ko ', # 0xa4
'Jiao ', # 0xa5
'Zhen ', # 0xa6
'Ce ', # 0xa7
'Qiao ', # 0xa8
'Kuai ', # 0xa9
'Chai ', # 0xaa
'Ning ', # 0xab
'Nong ', # 0xac
'Jin ', # 0xad
'Wu ', # 0xae
'Hou ', # 0xaf
'Jiong ', # 0xb0
'Cheng ', # 0xb1
'Zhen ', # 0xb2
'Zuo ', # 0xb3
'Chou ', # 0xb4
'Qin ', # 0xb5
'Lu ', # 0xb6
'Ju ', # 0xb7
'Shu ', # 0xb8
'Ting ', # 0xb9
'Shen ', # 0xba
'Tuo ', # 0xbb
'Bo ', # 0xbc
'Nan ', # 0xbd
'Hao ', # 0xbe
'Bian ', # 0xbf
'Tui ', # 0xc0
'Yu ', # 0xc1
'Xi ', # 0xc2
'Cu ', # 0xc3
'E ', # 0xc4
'Qiu ', # 0xc5
'Xu ', # 0xc6
'Kuang ', # 0xc7
'Ku ', # 0xc8
'Wu ', # 0xc9
'Jun ', # 0xca
'Yi ', # 0xcb
'Fu ', # 0xcc
'Lang ', # 0xcd
'Zu ', # 0xce
'Qiao ', # 0xcf
'Li ', # 0xd0
'Yong ', # 0xd1
'Hun ', # 0xd2
'Jing ', # 0xd3
'Xian ', # 0xd4
'San ', # 0xd5
'Pai ', # 0xd6
'Su ', # 0xd7
'Fu ', # 0xd8
'Xi ', # 0xd9
'Li ', # 0xda
'Fu ', # 0xdb
'Ping ', # 0xdc
'Bao ', # 0xdd
'Yu ', # 0xde
'Si ', # 0xdf
'Xia ', # 0xe0
'Xin ', # 0xe1
'Xiu ', # 0xe2
'Yu ', # 0xe3
'Ti ', # 0xe4
'Che ', # 0xe5
'Chou ', # 0xe6
'[?] ', # 0xe7
'Yan ', # 0xe8
'Lia ', # 0xe9
'Li ', # 0xea
'Lai ', # 0xeb
'[?] ', # 0xec
'Jian ', # 0xed
'Xiu ', # 0xee
'Fu ', # 0xef
'He ', # 0xf0
'Ju ', # 0xf1
'Xiao ', # 0xf2
'Pai ', # 0xf3
'Jian ', # 0xf4
'Biao ', # 0xf5
'Chu ', # 0xf6
'Fei ', # 0xf7
'Feng ', # 0xf8
'Ya ', # 0xf9
'An ', # 0xfa
'Bei ', # 0xfb
'Yu ', # 0xfc
'Xin ', # 0xfd
'Bi ', # 0xfe
'Jian ', # 0xff
)
|
abelkhan/websearch
|
refs/heads/master
|
websearch/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
rohlandm/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/manifest/log.py
|
317
|
import logging
logging.basicConfig()
logger = logging.getLogger("manifest")
logger.setLevel(logging.DEBUG)
def get_logger():
return logger
|
yangzilong1986/python
|
refs/heads/master
|
JiYouMCC/0011/0011.py
|
24
|
# -*- coding: utf-8 -*-
def word_check(input_word, filtered_words):
for word in filtered_words:
if word in input_word:
return 'Freedom'
return 'Human Rights'
file = open('filtered_words.txt')
filtered_words=[line.replace('\n','') for line in file]
print word_check('程序员在上班。', filtered_words)
print word_check('我妈妈是农民。', filtered_words)
|
ffu/DSA-3.2.2
|
refs/heads/master
|
gnuradio-core/src/python/gnuradio/gr/qa_kludged_imports.py
|
6
|
#!/usr/bin/env python
#
# Copyright 2005,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_head (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_blks_import(self):
# make sure that this somewhat magic import works
from gnuradio import blks2
def test_gru_import(self):
# make sure that this somewhat magic import works
from gnuradio import gru
if __name__ == '__main__':
gr_unittest.main ()
|
rjeczalik/koding
|
refs/heads/master
|
scripts/assassin.py
|
13
|
#!/usr/bin/env python
import os
import sys
import time
import json
import socket
import commands
try:
import psutil
import pycurl
except ImportError:
print "Install required packages first: pip install psutil pycurl"
sys.exit(1)
THRESHOLD = 1.0 # percentage
KILL_THRESHOLD = 3.0 # average percentage before deciding to kill
REPEAT_EVERY = 1 # seconds
MAX_OCCURRENCE = 5 # times
MAX_REPEAT = 10 # times ~ 0 to infinite
KILL_ENABLED = False
SLACK_ENABLED = False
WHITE_LIST = [
"kloud",
"koding-webserver",
"koding-authworker",
"koding-socialworker",
]
(status, hostaddr) = commands.getstatusoutput(
"/opt/aws/bin/ec2-metadata --public-hostname"
)
hostname = socket.gethostname()
if status == 0:
hostaddr = "ssh://ec2-user@%s" % hostaddr.split(':')[-1].strip()
hostname = "<%s|%s>" % (hostaddr, hostname)
else:
hostname = "*%s*" % hostname
my_pid = os.getpid()
bad_guys = {}
PAYLOAD = {
"channel" : "#_devops",
"username" : "py_assassin",
"icon_emoji" : ":snake:"
}
slack = pycurl.Curl()
slack.setopt(pycurl.URL, "")
slack.setopt(pycurl.POST, 1)
def slack_it(message):
print message
if not SLACK_ENABLED:
return
PAYLOAD['text'] = "[%s] %s" % (hostname, message)
slack.setopt(pycurl.POSTFIELDS, "payload=%s" % json.dumps(PAYLOAD))
slack.perform()
def get_top_processes():
procs = []
for p in psutil.process_iter():
if p.pid == my_pid:
continue
try:
p.dict = p.as_dict(['cpu_percent', 'name', 'status'])
except:
pass
else:
if p.dict['cpu_percent'] > THRESHOLD:
procs.append(p)
# return processes sorted by CPU percent usage
return sorted(procs, key=lambda p: p.dict['cpu_percent'], reverse = True)
def kill(proc, usage):
usage = usage / MAX_OCCURRENCE # get average CPU usage
if usage > KILL_THRESHOLD:
if KILL_ENABLED and proc.name() in WHITE_LIST:
slack_it("Killing: *%s* (*PID %s*) usage was: %s" %
(proc.name(), proc.pid, usage))
proc.kill()
else:
print("If I was able to, I would like to kill: *%s* (*PID %s*) "
"since it's using %s cpu on average..." %
(proc.name(), proc.pid, usage))
else:
print("Giving another chance to *%s* since "
"its usage average (*%s*) below kill "
"threshold: *%s* " % (proc.name(), usage, KILL_THRESHOLD))
del bad_guys[proc.pid]
def checks():
global bad_guys
top_process = get_top_processes()
if len(top_process) == 0:
bad_guys = {}
return
for proc in top_process[0:5]:
if proc.pid in bad_guys:
bad_guys[proc.pid]['counter'] += 1
bad_guys[proc.pid]['cpu'] += proc.dict['cpu_percent']
if bad_guys[proc.pid]['counter'] >= MAX_OCCURRENCE:
kill(proc, bad_guys[proc.pid]['cpu'])
else:
bad_guys[proc.pid] = dict( proc = proc, counter = 0, cpu = 0 )
PROCESS_OUT = "Process '%s' (PID %s) is using more than %s " \
"CPU (%s) in last %d seconds for the %d times."
for pid, p in bad_guys.iteritems():
[p, counter] = [p['proc'].dict, p['counter']]
print(PROCESS_OUT % (
p['name'], pid, THRESHOLD, p['cpu_percent'], REPEAT_EVERY, counter
))
def main():
counter = 0
while True:
checks()
counter += 1
if counter == MAX_REPEAT:
print("Done.")
sys.exit()
time.sleep(REPEAT_EVERY)
if __name__ == '__main__':
print "Assassin started..."
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
|
percona/debian-percona-xtradb-cluster-5.6
|
refs/heads/master
|
plugin/percona-pam-for-mysql/doc/source/conf.py
|
33
|
# -*- coding: utf-8 -*-
#
# Percona PAM authentication plugin documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 27 22:27:15 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig',
'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Percona PAM authentication plugin for MySQL'
copyright = u'2012, Percona Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
rst_prolog = '''
.. |check| replace:: ``|[[---CHECK---]]|``
.. |xtrabackup| replace:: :program:`xtrabackup`
.. |innobackupex| replace:: :program:`innobackupex`
.. |XtraBackup| replace:: *XtraBackup*
.. |Percona Server| replace:: *Percona Server*
.. |Percona| replace:: *Percona*
.. |MySQL| replace:: *MySQL*
.. |Drizzle| replace:: *Drizzle*
.. |MariaDB| replace:: *MariaDB*
'''
extlinks = {'bug': ('https://bugs.launchpad.net/percona-pam-for-mysql/+bug/%s',
'#')}
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'percona-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.', './percona-theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Percona PAM authenticatino plugin for MySQL Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'PAM Plugin Docs'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'percona-pam-plugin-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'percona_favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PerconaPAMForMySQL'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PerconaPAMForMySQL.tex', u'Percona PAM Authentication Plugin for MySQL Documentation',
u'Percona Inc', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
# ('index', 'perconapamplugin', u'Percona PAM Authentication Plugin for MySQL Documentation',
# [u'Percona Inc'], 1)
]
|
jpiccino/isonline
|
refs/heads/master
|
ping_enUS.py
|
1
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
ledtest = 2 # Turns on while pinging
ledNOK = 3 # Turns on if the ping response is an error
ledOK = 4 # Turns on if there is a ping response
GPIO.setup(ledtest, GPIO.OUT)
GPIO.setup(ledNOK, GPIO.OUT)
GPIO.setup(ledOK, GPIO.OUT)
print "Testing the leds"
GPIO.output(ledtest, 1)
time.sleep(0.1)
GPIO.output(ledtest, 0)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
print "Tested"
url=raw_input("Site:")
intervalo=input("Interval (seconds):")
tenta=input("Attempts (how many times the script pings):")
cont=0
while cont < tenta:
time.sleep(intervalo)
# this ping part below is based on the answer of user 10flow as posted on http://stackoverflow.com/questions/2953462/pinging-servers-in-python
print ""
print "Attempt " + str(cont+1)
print ""
GPIO.output(ledtest, 1)
import os
hostname = url #example
response = os.system("ping -c 1 " + hostname)
GPIO.output(ledtest, 0)
#and then check the response...
if response == 0:
GPIO.output(ledNOK, 0)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
time.sleep(0.1)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
time.sleep(0.1)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
time.sleep(0.1)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
time.sleep(0.1)
GPIO.output(ledOK, 1)
time.sleep(0.1)
GPIO.output(ledOK, 0)
time.sleep(0.1)
GPIO.output(ledOK, 1)
else:
GPIO.output(ledOK, 0)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
time.sleep(0.1)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
time.sleep(0.1)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
time.sleep(0.1)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
time.sleep(0.1)
GPIO.output(ledNOK, 1)
time.sleep(0.1)
GPIO.output(ledNOK, 0)
time.sleep(0.1)
GPIO.output(ledNOK, 1)
cont=cont+1
else:
GPIO.output(ledOK, 0)
GPIO.output(ledNOK, 0)
print ""
print "Ended!"
|
buqing2009/MissionPlanner
|
refs/heads/master
|
Lib/lib2to3/fixes/fix_paren.py
|
61
|
"""Fixer that addes parentheses where they are required
This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
# By Taek Joo Kim and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import LParen, RParen
# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
class FixParen(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
atom< ('[' | '(')
(listmaker< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>
|
testlist_gexp< any
comp_for<
'for' NAME 'in'
target=testlist_safe< any (',' any)+ [',']
>
[any]
>
>)
(']' | ')') >
"""
def transform(self, node, results):
target = results["target"]
lparen = LParen()
lparen.prefix = target.prefix
target.prefix = u"" # Make it hug the parentheses
target.insert_child(0, lparen)
target.append_child(RParen())
|
ArcherCraftStore/ArcherVMPeridot
|
refs/heads/master
|
Python/Lib/idlelib/AutoCompleteWindow.py
|
88
|
"""
An auto-completion window for IDLE, used by the AutoComplete extension
"""
from tkinter import *
from idlelib.MultiCall import MC_SHIFT
from idlelib.AutoComplete import COMPLETE_FILES, COMPLETE_ATTRIBUTES
HIDE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-hide>>"
HIDE_SEQUENCES = ("<FocusOut>", "<ButtonPress>")
KEYPRESS_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keypress>>"
# We need to bind event beyond <Key> so that the function will be called
# before the default specific IDLE function
KEYPRESS_SEQUENCES = ("<Key>", "<Key-BackSpace>", "<Key-Return>", "<Key-Tab>",
"<Key-Up>", "<Key-Down>", "<Key-Home>", "<Key-End>",
"<Key-Prior>", "<Key-Next>")
KEYRELEASE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keyrelease>>"
KEYRELEASE_SEQUENCE = "<KeyRelease>"
LISTUPDATE_SEQUENCE = "<B1-ButtonRelease>"
WINCONFIG_SEQUENCE = "<Configure>"
DOUBLECLICK_SEQUENCE = "<B1-Double-ButtonRelease>"
class AutoCompleteWindow:
def __init__(self, widget):
# The widget (Text) on which we place the AutoCompleteWindow
self.widget = widget
# The widgets we create
self.autocompletewindow = self.listbox = self.scrollbar = None
# The default foreground and background of a selection. Saved because
# they are changed to the regular colors of list items when the
# completion start is not a prefix of the selected completion
self.origselforeground = self.origselbackground = None
# The list of completions
self.completions = None
# A list with more completions, or None
self.morecompletions = None
# The completion mode. Either AutoComplete.COMPLETE_ATTRIBUTES or
# AutoComplete.COMPLETE_FILES
self.mode = None
# The current completion start, on the text box (a string)
self.start = None
# The index of the start of the completion
self.startindex = None
# The last typed start, used so that when the selection changes,
# the new start will be as close as possible to the last typed one.
self.lasttypedstart = None
# Do we have an indication that the user wants the completion window
# (for example, he clicked the list)
self.userwantswindow = None
# event ids
self.hideid = self.keypressid = self.listupdateid = self.winconfigid \
= self.keyreleaseid = self.doubleclickid = None
# Flag set if last keypress was a tab
self.lastkey_was_tab = False
def _change_start(self, newstart):
min_len = min(len(self.start), len(newstart))
i = 0
while i < min_len and self.start[i] == newstart[i]:
i += 1
if i < len(self.start):
self.widget.delete("%s+%dc" % (self.startindex, i),
"%s+%dc" % (self.startindex, len(self.start)))
if i < len(newstart):
self.widget.insert("%s+%dc" % (self.startindex, i),
newstart[i:])
self.start = newstart
def _binary_search(self, s):
"""Find the first index in self.completions where completions[i] is
greater or equal to s, or the last index if there is no such
one."""
i = 0; j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m] >= s:
j = m
else:
i = m + 1
return min(i, len(self.completions)-1)
def _complete_string(self, s):
"""Assuming that s is the prefix of a string in self.completions,
return the longest string which is a prefix of all the strings which
s is a prefix of them. If s is not a prefix of a string, return s."""
first = self._binary_search(s)
if self.completions[first][:len(s)] != s:
# There is not even one completion which s is a prefix of.
return s
# Find the end of the range of completions where s is a prefix of.
i = first + 1
j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m][:len(s)] != s:
j = m
else:
i = m + 1
last = i-1
if first == last: # only one possible completion
return self.completions[first]
# We should return the maximum prefix of first and last
first_comp = self.completions[first]
last_comp = self.completions[last]
min_len = min(len(first_comp), len(last_comp))
i = len(s)
while i < min_len and first_comp[i] == last_comp[i]:
i += 1
return first_comp[:i]
def _selection_changed(self):
"""Should be called when the selection of the Listbox has changed.
Updates the Listbox display and calls _change_start."""
cursel = int(self.listbox.curselection()[0])
self.listbox.see(cursel)
lts = self.lasttypedstart
selstart = self.completions[cursel]
if self._binary_search(lts) == cursel:
newstart = lts
else:
min_len = min(len(lts), len(selstart))
i = 0
while i < min_len and lts[i] == selstart[i]:
i += 1
newstart = selstart[:i]
self._change_start(newstart)
if self.completions[cursel][:len(self.start)] == self.start:
# start is a prefix of the selected completion
self.listbox.configure(selectbackground=self.origselbackground,
selectforeground=self.origselforeground)
else:
self.listbox.configure(selectbackground=self.listbox.cget("bg"),
selectforeground=self.listbox.cget("fg"))
# If there are more completions, show them, and call me again.
if self.morecompletions:
self.completions = self.morecompletions
self.morecompletions = None
self.listbox.delete(0, END)
for item in self.completions:
self.listbox.insert(END, item)
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
def show_window(self, comp_lists, index, complete, mode, userWantsWin):
"""Show the autocomplete list, bind events.
If complete is True, complete the text, and if there is exactly one
matching completion, don't open a list."""
# Handle the start we already have
self.completions, self.morecompletions = comp_lists
self.mode = mode
self.startindex = self.widget.index(index)
self.start = self.widget.get(self.startindex, "insert")
if complete:
completed = self._complete_string(self.start)
start = self.start
self._change_start(completed)
i = self._binary_search(completed)
if self.completions[i] == completed and \
(i == len(self.completions)-1 or
self.completions[i+1][:len(completed)] != completed):
# There is exactly one matching completion
return completed == start
self.userwantswindow = userWantsWin
self.lasttypedstart = self.start
# Put widgets in place
self.autocompletewindow = acw = Toplevel(self.widget)
# Put it in a position so that it is not seen.
acw.wm_geometry("+10000+10000")
# Make it float
acw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w,
"help", "noActivates")
except TclError:
pass
self.scrollbar = scrollbar = Scrollbar(acw, orient=VERTICAL)
self.listbox = listbox = Listbox(acw, yscrollcommand=scrollbar.set,
exportselection=False, bg="white")
for item in self.completions:
listbox.insert(END, item)
self.origselforeground = listbox.cget("selectforeground")
self.origselbackground = listbox.cget("selectbackground")
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.pack(side=LEFT, fill=BOTH, expand=True)
# Initialize the listbox selection
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
# bind events
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
self.keypressid = self.widget.bind(KEYPRESS_VIRTUAL_EVENT_NAME,
self.keypress_event)
for seq in KEYPRESS_SEQUENCES:
self.widget.event_add(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.keyreleaseid = self.widget.bind(KEYRELEASE_VIRTUAL_EVENT_NAME,
self.keyrelease_event)
self.widget.event_add(KEYRELEASE_VIRTUAL_EVENT_NAME,KEYRELEASE_SEQUENCE)
self.listupdateid = listbox.bind(LISTUPDATE_SEQUENCE,
self.listselect_event)
self.winconfigid = acw.bind(WINCONFIG_SEQUENCE, self.winconfig_event)
self.doubleclickid = listbox.bind(DOUBLECLICK_SEQUENCE,
self.doubleclick_event)
def winconfig_event(self, event):
if not self.is_active():
return
# Position the completion list window
text = self.widget
text.see(self.startindex)
x, y, cx, cy = text.bbox(self.startindex)
acw = self.autocompletewindow
acw_width, acw_height = acw.winfo_width(), acw.winfo_height()
text_width, text_height = text.winfo_width(), text.winfo_height()
new_x = text.winfo_rootx() + min(x, max(0, text_width - acw_width))
new_y = text.winfo_rooty() + y
if (text_height - (y + cy) >= acw_height # enough height below
or y < acw_height): # not enough height above
# place acw below current line
new_y += cy
else:
# place acw above current line
new_y -= acw_height
acw.wm_geometry("+%d+%d" % (new_x, new_y))
def hide_event(self, event):
if not self.is_active():
return
self.hide_window()
def listselect_event(self, event):
if not self.is_active():
return
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
def doubleclick_event(self, event):
# Put the selected completion in the text, and close the list
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
def keypress_event(self, event):
if not self.is_active():
return
keysym = event.keysym
if hasattr(event, "mc_state"):
state = event.mc_state
else:
state = 0
if keysym != "Tab":
self.lastkey_was_tab = False
if (len(keysym) == 1 or keysym in ("underscore", "BackSpace")
or (self.mode == COMPLETE_FILES and keysym in
("period", "minus"))) \
and not (state & ~MC_SHIFT):
# Normal editing of text
if len(keysym) == 1:
self._change_start(self.start + keysym)
elif keysym == "underscore":
self._change_start(self.start + '_')
elif keysym == "period":
self._change_start(self.start + '.')
elif keysym == "minus":
self._change_start(self.start + '-')
else:
# keysym == "BackSpace"
if len(self.start) == 0:
self.hide_window()
return
self._change_start(self.start[:-1])
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
elif keysym == "Return":
self.hide_window()
return
elif (self.mode == COMPLETE_ATTRIBUTES and keysym in
("period", "space", "parenleft", "parenright", "bracketleft",
"bracketright")) or \
(self.mode == COMPLETE_FILES and keysym in
("slash", "backslash", "quotedbl", "apostrophe")) \
and not (state & ~MC_SHIFT):
# If start is a prefix of the selection, but is not '' when
# completing file names, put the whole
# selected completion. Anyway, close the list.
cursel = int(self.listbox.curselection()[0])
if self.completions[cursel][:len(self.start)] == self.start \
and (self.mode == COMPLETE_ATTRIBUTES or self.start):
self._change_start(self.completions[cursel])
self.hide_window()
return
elif keysym in ("Home", "End", "Prior", "Next", "Up", "Down") and \
not state:
# Move the selection in the listbox
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
if keysym == "Home":
newsel = 0
elif keysym == "End":
newsel = len(self.completions)-1
elif keysym in ("Prior", "Next"):
jump = self.listbox.nearest(self.listbox.winfo_height()) - \
self.listbox.nearest(0)
if keysym == "Prior":
newsel = max(0, cursel-jump)
else:
assert keysym == "Next"
newsel = min(len(self.completions)-1, cursel+jump)
elif keysym == "Up":
newsel = max(0, cursel-1)
else:
assert keysym == "Down"
newsel = min(len(self.completions)-1, cursel+1)
self.listbox.select_clear(cursel)
self.listbox.select_set(newsel)
self._selection_changed()
self._change_start(self.completions[newsel])
return "break"
elif (keysym == "Tab" and not state):
if self.lastkey_was_tab:
# two tabs in a row; insert current selection and close acw
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
return "break"
else:
# first tab; let AutoComplete handle the completion
self.userwantswindow = True
self.lastkey_was_tab = True
return
elif any(s in keysym for s in ("Shift", "Control", "Alt",
"Meta", "Command", "Option")):
# A modifier key, so ignore
return
elif event.char and event.char >= ' ':
# Regular character with a non-length-1 keycode
self._change_start(self.start + event.char)
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
else:
# Unknown event, close the window and let it through.
self.hide_window()
return
def keyrelease_event(self, event):
if not self.is_active():
return
if self.widget.index("insert") != \
self.widget.index("%s+%dc" % (self.startindex, len(self.start))):
# If we didn't catch an event which moved the insert, close window
self.hide_window()
def is_active(self):
return self.autocompletewindow is not None
def complete(self):
self._change_start(self._complete_string(self.start))
# The selection doesn't change.
def hide_window(self):
if not self.is_active():
return
# unbind events
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
for seq in KEYPRESS_SEQUENCES:
self.widget.event_delete(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(KEYPRESS_VIRTUAL_EVENT_NAME, self.keypressid)
self.keypressid = None
self.widget.event_delete(KEYRELEASE_VIRTUAL_EVENT_NAME,
KEYRELEASE_SEQUENCE)
self.widget.unbind(KEYRELEASE_VIRTUAL_EVENT_NAME, self.keyreleaseid)
self.keyreleaseid = None
self.listbox.unbind(LISTUPDATE_SEQUENCE, self.listupdateid)
self.listupdateid = None
self.autocompletewindow.unbind(WINCONFIG_SEQUENCE, self.winconfigid)
self.winconfigid = None
# destroy widgets
self.scrollbar.destroy()
self.scrollbar = None
self.listbox.destroy()
self.listbox = None
self.autocompletewindow.destroy()
self.autocompletewindow = None
|
ESOedX/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/tests/test_core_caching.py
|
1
|
"""
Tests core caching facilities.
"""
from __future__ import absolute_import
from django.test import TestCase
from opaque_keys.edx.locator import AssetLocator, CourseLocator
from openedx.core.djangoapps.contentserver.caching import del_cached_content, get_cached_content, set_cached_content
class Content(object):
"""
Mock cached content
"""
def __init__(self, location, content):
self.location = location
self.content = content
def get_id(self):
return self.location.to_deprecated_son()
class CachingTestCase(TestCase):
"""
Tests for https://edx.lighthouseapp.com/projects/102637/tickets/112-updating-asset-does-not-refresh-the-cached-copy
"""
unicodeLocation = AssetLocator(CourseLocator(u'c4x', u'mitX', u'800'), u'thumbnail', u'monsters.jpg')
# Note that some of the parts are strings instead of unicode strings
nonUnicodeLocation = AssetLocator(CourseLocator('c4x', u'mitX', u'800'), 'thumbnail', 'monsters.jpg')
mockAsset = Content(unicodeLocation, 'my content')
def test_put_and_get(self):
set_cached_content(self.mockAsset)
self.assertEqual(self.mockAsset.content, get_cached_content(self.unicodeLocation).content,
'should be stored in cache with unicodeLocation')
self.assertEqual(self.mockAsset.content, get_cached_content(self.nonUnicodeLocation).content,
'should be stored in cache with nonUnicodeLocation')
def test_delete(self):
set_cached_content(self.mockAsset)
del_cached_content(self.nonUnicodeLocation)
self.assertEqual(None, get_cached_content(self.unicodeLocation),
'should not be stored in cache with unicodeLocation')
self.assertEqual(None, get_cached_content(self.nonUnicodeLocation),
'should not be stored in cache with nonUnicodeLocation')
|
Tetpay/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-xcode-gcc-clang.py
|
254
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcode-style GCC_... settings that require clang are handled
properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-gcc'
test.run_gyp('test-clang.gyp', chdir=CHDIR)
test.build('test-clang.gyp', 'aliasing_yes', chdir=CHDIR)
test.run_built_executable('aliasing_yes', chdir=CHDIR, stdout="1\n")
test.build('test-clang.gyp', 'aliasing_no', chdir=CHDIR)
test.run_built_executable('aliasing_no', chdir=CHDIR, stdout="0\n")
# The default behavior changed: strict aliasing used to be off, now it's on
# by default. The important part is that this is identical for all generators
# (which it is). TODO(thakis): Enable this once the bots have a newer Xcode.
#test.build('test-clang.gyp', 'aliasing_default', chdir=CHDIR)
#test.run_built_executable('aliasing_default', chdir=CHDIR, stdout="1\n")
# For now, just check the generated ninja file:
if test.format == 'ninja':
contents = open(test.built_file_path('obj/aliasing_default.ninja',
chdir=CHDIR)).read()
if 'strict-aliasing' in contents:
test.fail_test()
test.pass_test()
|
jeffbean/kubernetes
|
refs/heads/master
|
examples/celery-rabbitmq/celery-app-add/run_tasks.py
|
471
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import syslog
import time
from celery_conf import add
while True:
x = random.randint(1, 10)
y = random.randint(1, 10)
res = add.delay(x, y)
time.sleep(5)
if res.ready():
res.get()
|
kuipertan/vitess
|
refs/heads/master
|
test/environment.py
|
10
|
#!/usr/bin/env python
import json
import logging
import os
import socket
import subprocess
import sys
# Import the topo implementations that you want registered as options for the
# --topo-server-flavor flag.
import topo_flavor.zookeeper
import topo_flavor.etcd
from topo_flavor.server import topo_server
# import the protocol flavors we want to use
import gorpc_protocols_flavor
import grpc_protocols_flavor
# sanity check the environment
if os.environ['USER'] == 'root':
sys.stderr.write(
'ERROR: Vitess and its dependencies (mysqld and memcached) '
'should not be run as root.\n')
sys.exit(1)
if 'VTTOP' not in os.environ:
sys.stderr.write(
'ERROR: Vitess environment not set up. '
'Please run "source dev.env" first.\n')
sys.exit(1)
# vttop is the toplevel of the vitess source tree
vttop = os.environ['VTTOP']
# vtroot is where everything gets installed
vtroot = os.environ['VTROOT']
# vtdataroot is where to put all the data files
vtdataroot = os.environ.get('VTDATAROOT', '/vt')
# vt_mysql_root is where MySQL is installed
vt_mysql_root = os.environ.get(
'VT_MYSQL_ROOT', os.path.join(vtroot, 'dist', 'mysql'))
# tmproot is the temporary place to put all test files
tmproot = os.path.join(vtdataroot, 'tmp')
# vtlogroot is where to put all the log files
vtlogroot = tmproot
# where to start allocating ports from
vtportstart = int(os.environ.get('VTPORTSTART', '6700'))
# url in which binaries export their status.
status_url = '/debug/status'
# location of the curl binary, used for some tests.
curl_bin = '/usr/bin/curl'
# if set, we will not build the binaries
skip_build = False
# location of the run_local_database.py file
run_local_database = os.path.join(vtroot, 'py-vtdb', 'vttest',
'run_local_database.py')
def memcached_bin():
in_vt = os.path.join(vtroot, 'bin', 'memcached')
if os.path.exists(in_vt):
return in_vt
return 'memcached'
# url to hit to force the logs to flush.
flush_logs_url = '/debug/flushlogs'
def setup():
global tmproot
try:
os.makedirs(tmproot)
except OSError:
# directory already exists
pass
# port management: reserve count consecutive ports, returns the first one
def reserve_ports(count):
global vtportstart
result = vtportstart
vtportstart += count
return result
# simple run command, cannot use utils.run to avoid circular dependencies
def run(args, raise_on_error=True, **kargs):
try:
logging.debug(
'run: %s %s', str(args),
', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kargs)
stdout, stderr = proc.communicate()
except Exception as e:
raise Exception('Command failed', e, args)
if proc.returncode:
if raise_on_error:
raise Exception('Command failed: ' + ' '.join(args) + ':\n' + stdout +
stderr)
else:
logging.error('Command failed: %s:\n%s%s', ' '.join(args), stdout, stderr)
return stdout, stderr
# compile command line programs, only once
compiled_progs = []
def prog_compile(name):
if skip_build or name in compiled_progs:
return
compiled_progs.append(name)
logging.debug('Compiling %s', name)
run(['godep', 'go', 'install'], cwd=os.path.join(vttop, 'go', 'cmd', name))
# binary management: returns the full path for a binary this should
# typically not be used outside this file, unless you want to bypass
# global flag injection (see binary_args)
def binary_path(name):
prog_compile(name)
return os.path.join(vtroot, 'bin', name)
# returns flags specific to a given binary
# use this to globally inject flags any time a given command runs
# e.g. - if name == 'vtctl': return ['-extra_arg', 'value']
def binary_flags(name):
return []
# returns binary_path + binary_flags as a list
# this should be used instead of binary_path whenever possible
def binary_args(name):
return [binary_path(name)] + binary_flags(name)
# returns binary_path + binary_flags as a string
# this should be used instead of binary_path whenever possible
def binary_argstr(name):
return ' '.join(binary_args(name))
# binary management for the MySQL distribution.
def mysql_binary_path(name):
return os.path.join(vt_mysql_root, 'bin', name)
# add environment-specific command-line options
def add_options(parser):
pass
|
whitehorse-io/encarnia
|
refs/heads/master
|
pyenv/lib/python2.7/site-packages/django/contrib/gis/gdal/raster/band.py
|
308
|
from ctypes import byref, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six.moves import range
from .const import GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES
class GDALBand(GDALBase):
"""
Wraps a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
@property
def description(self):
"""
Returns the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Returns the total number of pixels in this band.
"""
return self.width * self.height
@property
def min(self):
"""
Returns the minimum pixel value for this band.
"""
return capi.get_band_minimum(self._ptr, byref(c_int()))
@property
def max(self):
"""
Returns the maximum pixel value for this band.
"""
return capi.get_band_maximum(self._ptr, byref(c_int()))
@property
def nodata_value(self):
"""
Returns the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Sets the nodata value for this band.
"""
if not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric.')
capi.set_band_nodata_value(self._ptr, value)
self.source._flush()
def datatype(self, as_string=False):
"""
Returns the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def data(self, data=None, offset=None, size=None, as_memoryview=False):
"""
Reads or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
if not offset:
offset = (0, 0)
if not size:
size = (self.width - offset[0], self.height - offset[1])
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (size[0] * size[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, six.memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), size[0],
size[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)).reshape(size)
else:
return list(data_array)
else:
self.source._flush()
class BandList(list):
def __init__(self, source):
self.source = source
list.__init__(self)
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
|
alxgu/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_file_copy.py
|
16
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_file_copy import ApiParameters
from library.modules.bigip_file_copy import IFileManager
from library.modules.bigip_file_copy import ModuleParameters
from library.modules.bigip_file_copy import ModuleManager
from library.modules.bigip_file_copy import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_file_copy import ApiParameters
from ansible.modules.network.f5.bigip_file_copy import IFileManager
from ansible.modules.network.f5.bigip_file_copy import ModuleParameters
from ansible.modules.network.f5.bigip_file_copy import ModuleManager
from ansible.modules.network.f5.bigip_file_copy import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
source='file.txt',
force=True
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.source == 'file.txt'
assert p.force is True
def test_api_parameters(self):
args = load_fixture('load_sys_file_external-monitor_1.json')
p = ApiParameters(params=args)
assert p.checksum == '0c78e6641632e47d11802b29cfd119d2233cb80a'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
source='file.txt',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
tm = IFileManager(module=module)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
tm.upload_to_device = Mock(return_value=True)
tm.remove_uploaded_file_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
|
willusher/ansible-modules-core
|
refs/heads/devel
|
files/xattr.py
|
39
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: xattr
version_added: "1.3"
short_description: set/retrieve extended attributes
description:
- Manages filesystem user defined extended attributes, requires that they are enabled
on the target filesystem and that the setfattr/getfattr utilities are present.
options:
name:
required: true
default: None
aliases: ['path']
description:
- The full path of the file/object to get the facts of
key:
required: false
default: None
description:
- The name of a specific Extended attribute key to set/retrieve
value:
required: false
default: None
description:
- The value to set the named name/key to, it automatically sets the C(state) to 'set'
state:
required: false
default: get
choices: [ 'read', 'present', 'all', 'keys', 'absent' ]
description:
- defines which state you want to do.
C(read) retrieves the current value for a C(key) (default)
C(present) sets C(name) to C(value), default if value is set
C(all) dumps all data
C(keys) retrieves all keys
C(absent) deletes the key
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- if yes, dereferences symlinks and sets/gets attributes on symlink target,
otherwise acts on symlink itself.
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Obtain the extended attributes of /etc/foo.conf
- xattr: name=/etc/foo.conf
# Sets the key 'foo' to value 'bar'
- xattr: path=/etc/foo.conf key=user.foo value=bar
# Removes the key 'foo'
- xattr: name=/etc/foo.conf key=user.foo state=absent
'''
import operator
def get_xattr_keys(module,path,follow):
cmd = [ module.get_bin_path('getfattr', True) ]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
cmd.append(path)
return _run_xattr(module,cmd)
def get_xattr(module,path,key,follow):
cmd = [ module.get_bin_path('getfattr', True) ]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
if key is None:
cmd.append('-d')
else:
cmd.append('-n %s' % key)
cmd.append(path)
return _run_xattr(module,cmd,False)
def set_xattr(module,path,key,value,follow):
cmd = [ module.get_bin_path('setfattr', True) ]
if not follow:
cmd.append('-h')
cmd.append('-n %s' % key)
cmd.append('-v %s' % value)
cmd.append(path)
return _run_xattr(module,cmd)
def rm_xattr(module,path,key,follow):
cmd = [ module.get_bin_path('setfattr', True) ]
if not follow:
cmd.append('-h')
cmd.append('-x %s' % key)
cmd.append(path)
return _run_xattr(module,cmd,False)
def _run_xattr(module,cmd,check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg="%s!" % e.strerror)
#result = {'raw': out}
result = {}
for line in out.splitlines():
if re.match("^#", line) or line == "":
pass
elif re.search('=', line):
(key, val) = line.split("=")
result[key] = val.strip('"')
else:
result[line] = ''
return result
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['path'], type='path'),
key = dict(required=False, default=None, type='str'),
value = dict(required=False, default=None, type='str'),
state = dict(required=False, default='read', choices=[ 'read', 'present', 'all', 'keys', 'absent' ], type='str'),
follow = dict(required=False, type='bool', default=True),
),
supports_check_mode=True,
)
path = module.params.get('name')
key = module.params.get('key')
value = module.params.get('value')
state = module.params.get('state')
follow = module.params.get('follow')
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
changed=False
msg = ""
res = {}
if key is None and state in ['present','absent']:
module.fail_json(msg="%s needs a key parameter" % state)
# All xattr must begin in user namespace
if key is not None and not re.match('^user\.',key):
key = 'user.%s' % key
if (state == 'present' or value is not None):
current=get_xattr(module,path,key,follow)
if current is None or not key in current or value != current[key]:
if not module.check_mode:
res = set_xattr(module,path,key,value,follow)
changed=True
res=current
msg="%s set to %s" % (key, value)
elif state == 'absent':
current=get_xattr(module,path,key,follow)
if current is not None and key in current:
if not module.check_mode:
res = rm_xattr(module,path,key,follow)
changed=True
res=current
msg="%s removed" % (key)
elif state == 'keys':
res=get_xattr_keys(module,path,follow)
msg="returning all keys"
elif state == 'all':
res=get_xattr(module,path,None,follow)
msg="dumping all"
else:
res=get_xattr(module,path,key,follow)
msg="returning %s" % key
module.exit_json(changed=changed, msg=msg, xattr=res)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
k3nnyfr/s2a_fr-nsis
|
refs/heads/master
|
s2a/Python/Lib/test/test_format.py
|
9
|
import sys
from test.test_support import verbose, have_unicode, TestFailed
import test.test_support as test_support
import unittest
maxsize = test_support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
def testformat(formatstr, args, output=None, limit=None, overflowok=False):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if output and limit is None and result != output:
if verbose:
print 'no'
raise AssertionError("%r %% %r == %r != %r" %
(formatstr, args, result, output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args, **kwargs):
testformat(formatstr, *args, **kwargs)
if have_unicode:
testformat(unicode(formatstr), *args, **kwargs)
class FormatTest(unittest.TestCase):
def test_format(self):
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1), overflowok=True) # expect overflow
testboth("%.100d", (1,), '00000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000001', overflowok=True)
testboth("%#.117x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000001',
overflowok=True)
testboth("%#.118x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000001',
overflowok=True)
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
# these tests should no longer cause overflow in Python
# 2.7/3.1 and later.
testboth("%#.*g", (110, -1.e+100/3.))
testboth("%#.*G", (110, -1.e+100/3.))
testboth("%#.*f", (110, -1.e+100/3.))
testboth("%#.*F", (110, -1.e+100/3.))
# Formatting of long integers. Overflow is not ok
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
testboth("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testboth("%x", float(big), "123456_______________", 6)
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
testboth("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%d", 42.0, "42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#x", 1.0, "0x1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%x", float(0x42), "42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
testboth("%o", float(042), "42")
# alternate float formatting
testformat('%g', 1.1, '1.1')
testformat('%#g', 1.1, '1.10000')
# Regression test for http://bugs.python.org/issue15516.
class IntFails(object):
def __int__(self):
raise TestFailed
def __long__(self):
return 0
fst = IntFails()
testformat("%x", fst, '0')
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "float argument required, not str")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(maxsize, -127) should fail'
def test_main():
test_support.run_unittest(FormatTest)
def test_precision(self):
INT_MAX = 2147483647
f = 1.2
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
self.assertEqual(str(cm.exception), "precision too big")
c = complex(f)
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
self.assertEqual(str(cm.exception), "precision too big")
if __name__ == "__main__":
unittest.main()
|
Yrthgze/prueba-sourcetree2
|
refs/heads/master
|
illuz/0001/key_generator.py
|
40
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: key_generator.py
# Create Date: 2015-02-09 10:43:36
# Descripton: Use uuid module to generate 200 register keys.
# Usage: key_generator.py
"""
第 0001 题:
做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
"""
from uuid import uuid4
def generate_key(num):
key_list = [str(uuid4()) for i in range(num)]
return key_list
def main():
print generate_key(200)
if __name__ == '__main__':
main()
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/importNotBroken.before.py
|
80
|
from shared import SharedClass
class Source(SharedClass):
pass
|
becm/meson
|
refs/heads/master
|
test cases/common/130 generated llvm ir/copyfile.py
|
100
|
#!/usr/bin/env python3
import sys
import shutil
shutil.copyfile(sys.argv[1], sys.argv[2])
|
hiteshwadekar/ns-3-dev-ndnSIM
|
refs/heads/ndnSIM
|
src/topology-read/bindings/callbacks_list.py
|
664
|
callback_classes = [
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
ubiar/odoo
|
refs/heads/8.0
|
addons/l10n_fr_hr_payroll/l10n_fr_hr_payroll.py
|
340
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'plafond_secu': fields.float('Plafond de la Securite Sociale', digits_compute=dp.get_precision('Payroll')),
'nombre_employes': fields.integer('Nombre d\'employes'),
'cotisation_prevoyance': fields.float('Cotisation Patronale Prevoyance', digits_compute=dp.get_precision('Payroll')),
'org_ss': fields.char('Organisme de securite sociale'),
'conv_coll': fields.char('Convention collective'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_columns = {
'qualif': fields.char('Qualification'),
'niveau': fields.char('Niveau'),
'coef': fields.char('Coefficient'),
}
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
_columns = {
'payment_mode': fields.char('Mode de paiement'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Gab0/gekkoJaponicus
|
refs/heads/master
|
evaluation/gekko/API.py
|
1
|
#!/bin/python
import os
import requests
import json
from subprocess import Popen, PIPE
def initializeGekko(): # not used yet.
CMD = ['node', gekkoDIR + '/gekko', '--ui']
D = Popen(CMD, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def checkInstance(instanceUrl):
try:
Request = requests.get(instanceUrl)
except Exception:
return False
if Request.text:
return True
def httpPost(URL, data={}, Verbose=True):
try:
Request = requests.post(URL, json=data)
Response = json.loads(Request.text)
except ConnectionRefusedError:
print("Error: Gekko comm error! Check your local Gekko instance.")
exit()
except Exception as e:
if Verbose:
print("Error: config failure")
print(e)
print(URL)
print(data)
return False
return Response
def loadHostsFile(HostsFilePath):
remoteGekkos = []
if os.path.isfile(HostsFilePath):
H = open(HostsFilePath).read().split('\n')
for W in H:
if W and not '=' in W and not '[' in W:
remoteGekkos.append("http://%s:3000" % W)
return remoteGekkos
|
yawnosnorous/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/persisted/journal/picklelog.py
|
64
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# -*- test-case-name: twisted.test.test_journal -*-
"""Logging that uses pickles.
TODO: add log that logs to a file.
"""
# twisted imports
from twisted.persisted import dirdbm
from twisted.internet import defer
from zope.interface import implements
# sibling imports
import base
class DirDBMLog:
"""Log pickles to DirDBM directory."""
implements(base.ICommandLog)
def __init__(self, logPath):
self.db = dirdbm.Shelf(logPath)
indexs = map(int, self.db.keys())
if indexs:
self.currentIndex = max(indexs)
else:
self.currentIndex = 0
def logCommand(self, command, time):
"""Log a command."""
self.currentIndex += 1
self.db[str(self.currentIndex)] = (time, command)
return defer.succeed(1)
def getCurrentIndex(self):
"""Return index of last command logged."""
return self.currentIndex
def getCommandsSince(self, index):
result = []
for i in range(index, self.currentIndex + 1):
result.append(self.db[str(i)])
return result
|
tashoecraft/node-workshop
|
refs/heads/master
|
challenge6/start/node_modules/browserify/node_modules/insert-module-globals/node_modules/lexical-scope/node_modules/astw/node_modules/esprima-six/tools/generate-unicode-regex.py
|
341
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <utatane.tea@gmail.com>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
|
InakiZabala/odoomrp-wip
|
refs/heads/8.0
|
product_pricelist_rules/models/__init__.py
|
25
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import product_pricelist
from . import product
|
Infinity-MaNGOS-Project-FallenAngelX/infinity_mangos
|
refs/heads/master
|
contrib/mmap/mmap_extract.py
|
1
|
#!/usr/bin/python
import os, sys, threading, time, subprocess
from multiprocessing import cpu_count
from collections import deque
mapList = deque([0,1,530,571,13,25,30,33,34,35,36,37,42,43,44,47,48,70,90,109,129,169,189,209,229,230,249,269,289,309,329,349,369,
389,409,429,449,450,451,469,489,509,529,531,532,533,534,540,542,543,544,545,546,547,548,550,552,553,554,555,556,557,558,559,
560,562,564,565,566,568,572,573,574,575,576,578,580,582,584,585,586,587,588,589,590,591,592,593,594,595,596,597,598,599,600,
601,602,603,604,605,606,607,608,609,610,612,613,614,615,616,617,618,619,620,621,622,623,624,628,631,632,641,642,647,649,650,
658,668,672,673,712,713,718,723,724])
class workerThread(threading.Thread):
def __init__(self, mapID):
threading.Thread.__init__(self)
self.mapID = mapID
def run(self):
name = "Worker for map %u" % (self.mapID)
print "++ %s" % (name)
if sys.platform == 'win32':
stInfo = subprocess.STARTUPINFO()
stInfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # this will not work on python 2.7
stInfo.wShowWindow = 7
cFlags = subprocess.CREATE_NEW_CONSOLE
binName = "MoveMapGen.exe"
else:
stInfo = None
cFlags = 0
binName = "./MoveMapGen"
retcode = subprocess.call([binName, "%u" % (self.mapID),"--silent"], startupinfo=stInfo, creationflags=cFlags)
print "-- %s" % (name)
if __name__ == "__main__":
cpu = cpu_count() - 0 # You can reduce the load by putting 1 instead of 0 if you need to free 1 core/cpu
if cpu < 1:
cpu = 1
print "I will always maintain %u MoveMapGen tasks running in //\n" % (cpu)
while (len(mapList) > 0):
if (threading.active_count() <= cpu):
workerThread(mapList.popleft()).start()
time.sleep(0.1)
|
nashve/mythbox
|
refs/heads/master
|
resources/lib/twisted/twisted/spread/__init__.py
|
61
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Spread: Spreadable (Distributed) Computing.
Future Plans: PB, Jelly and Banana need to be optimized.
@author: Glyph Lefkowitz
"""
|
teltek/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_user_state_client.py
|
11
|
"""
Black-box tests of the DjangoUserStateClient against the semantics
defined in edx_user_state_client.
"""
from collections import defaultdict
from unittest import skip
from django.test import TestCase
from edx_user_state_client.tests import UserStateClientTestBase
from courseware.tests.factories import UserFactory
from courseware.user_state_client import DjangoXBlockUserStateClient
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestDjangoUserStateClient(UserStateClientTestBase, ModuleStoreTestCase):
"""
Tests of the DjangoUserStateClient backend.
It reuses all tests from :class:`~UserStateClientTestBase`.
"""
shard = 4
__test__ = True
# Tell Django to clean out all databases, not just default
multi_db = True
def _user(self, user_idx):
return self.users[user_idx].username
def _block_type(self, block):
# We only record block state history in DjangoUserStateClient
# when the block type is 'problem'
return 'problem'
def setUp(self):
super(TestDjangoUserStateClient, self).setUp()
self.client = DjangoXBlockUserStateClient()
self.users = defaultdict(UserFactory.create)
|
yanchen036/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/lib/debug_service_pb2_grpc.py
|
78
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
#
# Do not use pylint on generated code.
# pylint: disable=missing-docstring,g-short-docstring-punctuation,g-no-space-after-docstring-summary,invalid-name,line-too-long,unused-argument,g-doc-args
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
from tensorflow.core.debug import debug_service_pb2 as tensorflow_dot_core_dot_debug_dot_debug__service__pb2
from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2
from tensorflow.core.util import event_pb2 as tensorflow_dot_core_dot_util_dot_event__pb2
class EventListenerStub(object):
"""EventListener: Receives Event protos, e.g., from debugged TensorFlow
runtime(s).
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendEvents = channel.stream_stream(
'/tensorflow.EventListener/SendEvents',
request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendTracebacks = channel.unary_unary(
'/tensorflow.EventListener/SendTracebacks',
request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendSourceFiles = channel.unary_unary(
'/tensorflow.EventListener/SendSourceFiles',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
class EventListenerServicer(object):
"""EventListener: Receives Event protos, e.g., from debugged TensorFlow
runtime(s).
"""
def SendEvents(self, request_iterator, context):
"""Client(s) can use this RPC method to send the EventListener Event protos.
The Event protos can hold information such as:
1) intermediate tensors from a debugged graph being executed, which can
be sent from DebugIdentity ops configured with grpc URLs.
2) GraphDefs of partition graphs, which can be sent from special debug
ops that get executed immediately after the beginning of the graph
execution.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendTracebacks(self, request, context):
"""Send the tracebacks of ops in a Python graph definition.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendSourceFiles(self, request, context):
"""Send a collection of source code files being debugged.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EventListenerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendEvents': grpc.stream_stream_rpc_method_handler(
servicer.SendEvents,
request_deserializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
'SendTracebacks': grpc.unary_unary_rpc_method_handler(
servicer.SendTracebacks,
request_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
'SendSourceFiles': grpc.unary_unary_rpc_method_handler(
servicer.SendSourceFiles,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.EventListener', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
Nu3001/external_chromium_org
|
refs/heads/master
|
chrome/common/extensions/docs/server2/chained_compiled_file_system_test.py
|
24
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from chained_compiled_file_system import ChainedCompiledFileSystem
from compiled_file_system import CompiledFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
_TEST_DATA_BASE = {
'a.txt': 'base a.txt',
'dir': {
'b.txt': 'base b.txt'
},
}
_TEST_DATA_NEW = {
'a.txt': 'new a.txt',
'new.txt': 'a new file',
'dir': {
'b.txt': 'new b.txt',
'new.txt': 'new file in dir',
},
}
identity = lambda _, x: x
class ChainedCompiledFileSystemTest(unittest.TestCase):
def setUp(self):
object_store_creator = ObjectStoreCreator(start_empty=False)
base_file_system = TestFileSystem(_TEST_DATA_BASE)
self._base_factory = CompiledFileSystem.Factory(base_file_system,
object_store_creator)
self._file_system = TestFileSystem(_TEST_DATA_NEW)
self._patched_factory = CompiledFileSystem.Factory(self._file_system,
object_store_creator)
self._chained_factory = ChainedCompiledFileSystem.Factory(
[(self._patched_factory, self._file_system),
(self._base_factory, base_file_system)])
self._base_compiled_fs = self._base_factory.Create(identity, TestFileSystem)
self._chained_compiled_fs = self._chained_factory.Create(
identity, TestFileSystem)
def testGetFromFile(self):
self.assertEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._base_compiled_fs.GetFromFile('a.txt'))
self.assertEqual(self._chained_compiled_fs.GetFromFile('new.txt'),
'a new file')
self.assertEqual(self._chained_compiled_fs.GetFromFile('dir/new.txt'),
'new file in dir')
self._file_system.IncrementStat('a.txt')
self.assertNotEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._base_compiled_fs.GetFromFile('a.txt'))
self.assertEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._file_system.ReadSingle('a.txt'))
def testGetFromFileListing(self):
self.assertEqual(self._chained_compiled_fs.GetFromFile('dir/'),
self._base_compiled_fs.GetFromFile('dir/'))
self._file_system.IncrementStat('dir/')
self.assertNotEqual(self._chained_compiled_fs.GetFromFileListing('dir/'),
self._base_compiled_fs.GetFromFileListing('dir/'))
self.assertEqual(self._chained_compiled_fs.GetFromFileListing('dir/'),
self._file_system.ReadSingle('dir/'))
if __name__ == '__main__':
unittest.main()
|
foreni-packages/golismero
|
refs/heads/master
|
thirdparty_libs/django/core/servers/fastcgi.py
|
241
|
"""
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import os
import sys
from django.utils import importlib
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default %(method)s).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s).
minspare=NUMBER min number of spare processes / threads (default %(minspare)s).
maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s).
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s).
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
""" % FASTCGI_OPTIONS
def fastcgi_help(message=None):
print(FASTCGI_HELP)
if message:
print(message)
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.stderr.write(" Unable to load the flup package. In order to run django\n")
sys.stderr.write(" as a FastCGI application, you will need to get flup from\n")
sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n")
sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n")
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or "
"thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except Exception:
print("Can't import flup." + flup_module)
return False
# Prep up and go
from django.core.servers.basehttp import get_internal_wsgi_application
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize "
"parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
with open(options["pidfile"], "w") as fp:
fp.write("%d\n" % os.getpid())
WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
|
kristi/Lyx
|
refs/heads/master
|
lib/scripts/TeXFiles.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# file TeXFiles.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
# \author Herbert Voß
# \author Jean-Marc Lasgouttes
# \author Jürgen Spitzmüller
# \author Bo Peng
# Full author contact details are available in file CREDITS.
# all files -> without option
# TeX class files -> option cls
# TeX style files -> option sty
# bibtex style files -> option bst
# bibtex database files -> option bib
#
# with the help
# of kpsewhich and creates a
# bstFiles.lst, clsFiles.lst, styFiles.lst, bibFiles.lst
# without any parameter all files are created.
#
# Herbert Voss <voss@perce.org>
#
# Updates from Jean-Marc Lasgouttes.
#
# bib support added by Juergen Spitzmueller (v0.3)
#
# translated to python by Bo Peng, so that the script only
# relies on python and kpsewhich (no shell command is used).
#
import os, sys, re
cls_stylefile = 'clsFiles.lst'
sty_stylefile = 'styFiles.lst'
bst_stylefile = 'bstFiles.lst'
bib_files = 'bibFiles.lst'
def cmdOutput(cmd):
'''utility function: run a command and get its output as a string
cmd: command to run
'''
fout = os.popen(cmd)
output = fout.read()
fout.close()
return output
# processing command line options
if len(sys.argv) > 1:
if sys.argv[1] in ['--help', '-help']:
print '''Usage: TeXFiles.py [-version | cls | sty | bst | bib ]
Default is without any Parameters,
so that all files will be created'''
sys.exit(0)
else:
types = sys.argv[1:]
for type in types:
if type not in ['cls', 'sty', 'bst', 'bib']:
print 'ERROR: unknown type', type
sys.exit(1)
else:
# if no parameter is specified, assume all
types = ['cls', 'sty', 'bst', 'bib']
#
# MS-DOS and MS-Windows define $COMSPEC or $ComSpec and use `;' to separate
# directories in path lists whereas Unix uses `:'. Make an exception for
# Cygwin, where we could have either teTeX (using `:') or MikTeX (using `;').
# Create a variable that holds the right character to be used by the scripts.
path_sep = os.pathsep
if sys.platform == 'cygwin':
# MikTeX's kpsewhich says "kpathsea emulation version x.x.x", whereas
# teTeX's simply "kpathsea version x.x.x".
if 'emulation' in cmdOutput('kpsewhich --version'):
path_sep = ';'
else:
path_sep = ':'
# process each file type
for type in types:
print "Indexing files of type", type
if type == 'cls':
outfile = cls_stylefile
kpsetype = '.tex'
elif type == 'sty':
outfile = sty_stylefile
kpsetype = '.tex'
elif type == 'bst':
outfile = bst_stylefile
kpsetype = '.bst'
elif type == 'bib':
outfile = bib_files
kpsetype = '.bib'
dirs = cmdOutput('kpsewhich --show-path=' + kpsetype).replace('!!', '').strip()
# remove excessive //
dirs = re.sub('//+', '/', dirs)
file_ext = '.' + type
out = open(outfile, 'w')
for dir in dirs.split(path_sep):
# for each valid directory
if not os.path.isdir(dir):
continue
# walk down the file hierarchy
for root,path,files in os.walk(dir):
# check file type
for file in files:
if len(file) > 4 and file[-4:] == file_ext:
# force the use of / since miktex uses / even under windows
print >> out, root.replace('\\', '/') + '/' + file
out.close()
|
guoxiao/hostsupdater
|
refs/heads/master
|
hostsupdater.py
|
1
|
#!/usr/bin/env python
import argparse
import dns.resolver
from multiprocessing.dummy import Pool as ThreadPool
class HostsUpdater():
"""
HostsUpdater: Read a hosts file and query to make a new one
"""
names = []
name_ip = []
nameservers = ['8.8.8.8']
THREADS = 10
def __init__(self, infilename, outfilename):
self.infilename = infilename
self.outfilename = outfilename
self.my_resolver = dns.resolver.Resolver()
self.my_resolver.nameservers = self.nameservers
def read(self):
with open(self.infilename, 'r') as infile:
for line in infile:
line = line.strip()
if line == "" or line[0] == '#':
pass
else:
self.names.append(line.split("\t")[1])
def query(self):
pool = ThreadPool(self.THREADS)
self.name_ip = pool.map(self.queryone, self.names)
pool.close()
pool.join()
print(self.name_ip)
def queryone(self, name):
try:
print(name)
answer = self.my_resolver.query(name, 'A', tcp=True)
ipaddr = answer[0].address
print(ipaddr)
return (ipaddr, name)
except dns.exception.DNSException:
return None
def output(self):
with open(self.outfilename, 'w') as outfile:
for item in self.name_ip:
if item is None:
pass
else:
line = "%s\t%s\n" % item
print(line)
outfile.write(line)
def run(self):
self.read()
self.query()
self.output()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='process hosts file')
parser.add_argument('infile', type=str, help="hosts file, for example /etc/hosts")
parser.add_argument('outfile', type=str, help="output file")
args = parser.parse_args()
hostupdater = HostsUpdater(args.infile, args.outfile)
hostupdater.run()
|
h4ck3rm1k3/BeautifulSoup4
|
refs/heads/master
|
bs4/tests/test_tree.py
|
38
|
# -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
from pdb import set_trace
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
PY3K,
CData,
Comment,
Declaration,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup(u'<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås')
def test_unicode_attribute_find(self):
soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>')
str(soup)
self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text)
def test_find_everything(self):
"""Test an optimization that finds all tags."""
soup = self.soup("<a>foo</a><b>bar</b>")
self.assertEqual(2, len(soup.find_all()))
def test_find_everything_with_name(self):
"""Test an optimization that finds all tags with a given name."""
soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>")
self.assertEqual(2, len(soup.find_all('a')))
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(string="bar"), [u"bar"])
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
[u"Foo", u"bar", u'\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
[u"Foo", u"bar", u'\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
def test_find_all_resultset(self):
"""All find_all calls return a ResultSet"""
soup = self.soup("<a></a>")
result = soup.find_all("a")
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(True)
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(text="foo")
self.assertTrue(hasattr(result, "source"))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = u"םולש".encode("utf8")
data = u'<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
# Passing in the class_ keyword argument will search against
# the 'class' attribute.
self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.'])
self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.'])
# Passing in a string to 'attrs' will also search the CSS class.
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
f = tree.find_all("gar", class_=re.compile("o"))
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", class_=re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
f = tree.find_all("gar", class_=re.compile("o b"))
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the class as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", class_="foo bar"))
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "lxml-xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html.parser")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
def test_new_string_can_create_navigablestring_subclass(self):
soup = self.soup("")
s = soup.new_string("foo", Comment)
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, Comment))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self):
soup = self.soup("<a><b>Foo</b></a><c>Bar</c>")
a = soup.a
a.extract()
self.assertEqual(None, a.parent)
self.assertRaises(ValueError, a.unwrap)
self.assertRaises(ValueError, a.replace_with, soup.c)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_exception_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(NotImplementedError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(NotImplementedError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_extract_multiples_of_same_tag(self):
soup = self.soup("""
<html>
<head>
<script>foo</script>
</head>
<body>
<script>bar</script>
<a></a>
</body>
<script>baz</script>
</html>""")
[soup.script.extract() for i in soup.find_all("script")]
self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body))
def test_extract_works_when_element_is_surrounded_by_identical_strings(self):
soup = self.soup(
'<html>\n'
'<body>hi</body>\n'
'</html>')
soup.find('body').extract()
self.assertEqual(None, soup.find('body'))
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertEqual(0, len(em.contents))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
def test_get_text_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(soup.get_text(), "foobar")
self.assertEqual(
soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar")
self.assertEqual(
soup.get_text(types=None), "fooIGNOREbar")
def test_all_strings_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(['foo', 'bar'], list(soup.strings))
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
def test_string_has_immutable_name_property(self):
string = self.soup("s").string
self.assertEqual(None, string.name)
def t():
string.name = 'foo'
self.assertRaises(AttributeError, t)
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
def test_copy_navigablestring_is_not_attached_to_tree(self):
html = u"<b>Foo<a></a></b><b>Bar</b>"
soup = self.soup(html)
s1 = soup.find(string="Foo")
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertEqual(None, s2.parent)
self.assertEqual(None, s2.next_element)
self.assertNotEqual(None, s1.next_sibling)
self.assertEqual(None, s2.next_sibling)
self.assertEqual(None, s2.previous_element)
def test_copy_navigablestring_subclass_has_same_type(self):
html = u"<b><!--Foo--></b>"
soup = self.soup(html)
s1 = soup.string
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertTrue(isinstance(s2, Comment))
def test_copy_entire_soup(self):
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
soup_copy = copy.copy(soup)
self.assertEqual(soup, soup_copy)
def test_copy_tag_copies_contents(self):
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
div = soup.div
div_copy = copy.copy(div)
# The two tags look the same, and evaluate to equal.
self.assertEqual(unicode(div), unicode(div_copy))
self.assertEqual(div, div_copy)
# But they're not the same object.
self.assertFalse(div is div_copy)
# And they don't have the same relation to the parse tree. The
# copy is not associated with a parse tree at all.
self.assertEqual(None, div_copy.parent)
self.assertEqual(None, div_copy.previous_element)
self.assertEqual(None, div_copy.find(string='Bar').next_element)
self.assertNotEqual(None, div.find(string='Bar').next_element)
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = u"<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for(u"<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = u'<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_formatter_skips_script_tag_for_html_documents(self):
doc = """
<script type="text/javascript">
console.log("< < hey > > ");
</script>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_formatter_skips_style_tag_for_html_documents(self):
doc = """
<style type="text/css">
console.log("< < hey > > ");
</style>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_prettify_leaves_preformatted_text_alone(self):
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ")
# Everything outside the <pre> tag is reformatted, but everything
# inside is left alone.
self.assertEqual(
u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>',
soup.div.prettify())
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(unicode, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
def test_repr(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
if PY3K:
self.assertEqual(html, repr(soup))
else:
self.assertEqual(b'<b>\\u2603</b>', repr(soup))
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
def test_declaration(self):
d = Declaration("foo")
self.assertEqual("<?foo?>", d.output_ready())
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
<custom-dashed-tag class="dashed" id="dash2"/>
<div data-tag="dashedvalue" id="data1"/>
</span>
</div>
<x id="xid">
<z id="zida"/>
<z id="zidab"/>
<z id="zidac"/>
</x>
<y id="yid">
<z id="zidb"/>
</y>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML, 'html.parser')
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, [u'The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 4)
for div in els:
self.assertEqual(div.name, 'div')
el = self.soup.select_one('div')
self.assertEqual('main', el['id'])
def test_select_one_returns_none_if_no_match(self):
match = self.soup.select_one('nonexistenttag')
self.assertEqual(None, match)
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner', 'data1'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['data1', 'main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertRaises(ValueError, self.soup.select, 'tag%t')
def test_select_dashed_tag_ids(self):
self.assertSelects('custom-dashed-tag', ['dash1', 'dash2'])
def test_select_dashed_by_id(self):
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
self.assertEqual(dashed[0].name, 'custom-dashed-tag')
self.assertEqual(dashed[0]['id'], 'dash2')
def test_dashed_tag_text(self):
self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.')
def test_select_dashed_matches_find_all(self):
self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag'))
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_attr('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_child_selector_id(self):
self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
('div[data-tag^="dashed"]', ['data1'])
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
('div[id$="1"]', ['data1']),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
('div[id*="1"]', ['data1']),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
('div[data-tag*="edval"]', ['data1'])
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
('div[data-tag]', ['data1'])
)
def test_unsupported_pseudoclass(self):
self.assertRaises(
NotImplementedError, self.soup.select, "a:no-such-pseudoclass")
self.assertRaises(
NotImplementedError, self.soup.select, "a:nth-of-type(a)")
def test_nth_of_type(self):
# Try to select first paragraph
els = self.soup.select('div#inner p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
# Try to select third paragraph
els = self.soup.select('div#inner p:nth-of-type(3)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Another')
# Try to select (non-existent!) fourth paragraph
els = self.soup.select('div#inner p:nth-of-type(4)')
self.assertEqual(len(els), 0)
# Pass in an invalid value.
self.assertRaises(
ValueError, self.soup.select, 'div p:nth-of-type(0)')
def test_nth_of_type_direct_descendant(self):
els = self.soup.select('div#inner > p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
def test_id_child_selector_nth_of_type(self):
self.assertSelects('#inner > p:nth-of-type(2)', ['p1'])
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner', 'data1'])
def test_overspecified_child_id(self):
self.assertSelects(".fancy #inner", ['inner'])
self.assertSelects(".normal #inner", [])
def test_adjacent_sibling_selector(self):
self.assertSelects('#p1 + h2', ['header2'])
self.assertSelects('#p1 + h2 + p', ['pmulti'])
self.assertSelects('#p1 + #header2 + .class1', ['pmulti'])
self.assertEqual([], self.soup.select('#p1 + p'))
def test_general_sibling_selector(self):
self.assertSelects('#p1 ~ h2', ['header2', 'header3'])
self.assertSelects('#p1 ~ #header2', ['header2'])
self.assertSelects('#p1 ~ h2 + a', ['me'])
self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me'])
self.assertEqual([], self.soup.select('#inner ~ h2'))
def test_dangling_combinator(self):
self.assertRaises(ValueError, self.soup.select, 'h1 >')
def test_sibling_combinator_wont_select_same_tag_twice(self):
self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
# Test the selector grouping operator (the comma)
def test_multiple_select(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_with_no_space(self):
self.assertSelects('x,y', ['xid', 'yid'])
def test_multiple_select_with_more_space(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_duplicated(self):
self.assertSelects('x, x', ['xid'])
def test_multiple_select_sibling(self):
self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
def test_multiple_select_tag_and_direct_descendant(self):
self.assertSelects('x, y > z', ['xid', 'zidb'])
def test_multiple_select_direct_descendant_and_tags(self):
self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_multiple_select_indirect_descendant(self):
self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_invalid_multiple_select(self):
self.assertRaises(ValueError, self.soup.select, ',x, y')
self.assertRaises(ValueError, self.soup.select, 'x,,y')
def test_multiple_select_attrs(self):
self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
def test_multiple_select_ids(self):
self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
def test_multiple_select_nested(self):
self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
|
jordivilaseca/ardupi-weather
|
refs/heads/master
|
RaspberryPi/ardupi_weather/database/mongodb.py
|
1
|
import pymongo
import json
import os
import logging
from logging.handlers import TimedRotatingFileHandler
logger = logging.getLogger('mongodb')
def jsonAdd(jsonFile, collection, func, newEntry, params=None):
"""
It appends a data entry to a json file.
Args:
jsonFile: Aboslut path to the json file.
collection: Name of the collection to which the data has to be stored.
func: Name of the operation to do (insert or upsert).
newEntry: Dictionary containg the data to store.
params: Extra parameters (a query key for a upsert operation).
"""
data = []
with open(jsonFile, 'r') as f:
data = json.loads(f.read())
data.append({'coll':collection, 'data':newEntry, 'func': func, 'params': params})
with open(jsonFile, 'w') as f:
f.write(json.dumps(data))
logger.warning('-ADDED TO QUEUE- (' + func + ' on ' + collection + ') ' + json.dumps(newEntry))
class mongodb:
"""
Class dealing with the storing of the data to a mongo database.
This class stores the data to a mongo database, in case that an exception occurs (for example, due to t
timeout or connection problems), it stores the data to a intermediate file and the next time that a query
to the database is correctly done it dumps the file to the database. The system stores operations as
inserting or upserting an entry to the database.
"""
def __init__(self, dbName, uri, dataPath, logPath):
"""
Initialization of a database.
Args:
dbName: Name of the database.
uri: Database connection uri.
dataPath: Path to where auxiliary file will be created.
logPath: Path to where log files will be created.
"""
self.uri = uri
self.dbName = dbName
logFile = logPath + 'mongodb.log'
handler = TimedRotatingFileHandler(logFile, when="midnight", backupCount=6)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s : %(message)s'))
logger.setLevel(logging.INFO)
logger.addHandler(handler)
try:
self.client=pymongo.MongoClient(uri)
except Exception as e:
print ("Could not connect to MongoDB: %s" % e)
self.jsonFile = dataPath + 'dataToUpdate.json'
if not os.path.exists(self.jsonFile):
with open(self.jsonFile, 'w+') as f:
f.write('[]')
self.existsDataToDump = self.dumpJson()
def dumpJson(self):
"""
Function in charge of dumping the auxiliary json file to the database.
In case that a mongo database error occurs during the dumping operation, the operation is aborted
and the remaining operations to do to the database are updated.
Returns:
It returns if a database occurred during the dumping.
"""
data = []
with open(self.jsonFile, 'r') as f:
data = json.loads(f.read())
timeoutError = False
iMax = len(data)
i = 0
while not timeoutError and i < iMax:
entry = data[0]
coll = self.getCollection(entry['coll'])
try:
if entry['func'] == 'insert':
coll.insert(entry['data'])
elif entry['func'] == 'upsert':
queryKey = entry['params']['queryKey']
coll.update({queryKey:entry['data'][queryKey]}, entry['data'], upsert=True)
del data[0]
logger.info('-EXECUTED- ' + entry['func'] + ' on ' + entry['coll'])
except Exception:
timeoutError = True
logger.warning('-ERROR DUMPING-' + entry['func'] + ' on ' + entry['coll'])
i += 1
with open(self.jsonFile, 'w') as f:
f.write(json.dumps(data))
return timeoutError
def getCollection(self, dbCollection):
"""
It gets a collection object from the database.
Args:
dbCollection: Name of the collection.
Returns:
The collection object.
"""
return self.client[self.dbName][dbCollection]
def insert(self, dbCollection, dic):
"""
It inserts a new document to a collection.
In case a database error occurs during the operation, the data is stored and the operation
done the next time a database operation is successfully done.
Args:
dbCollection: Name of the collection to which the data will be inserted.
dic: Dictionary containing the variables and its values.
"""
coll = self.getCollection(dbCollection)
try:
coll.insert(dic)
except Exception:
jsonAdd(self.jsonFile, dbCollection, 'insert', dic)
self.existsDataToDump = True
else:
if self.existsDataToDump:
self.existsDataToDump = self.dumpJson()
def upsert(self, dbCollection, queryKey, dic):
"""
It inserts an entry if it does not exist, or updates it otherwise.
In case a database error occurs during the operation, the data is stored and the operation
done the next time a database operation is successfully done.
Args:
dbCollection: Collection to which the data will be updated or inserted.
queryKey: Name of the variable that will be used to know if the data has to be inserted
or updated.
dic: Dictionary containing the variables names and its values.
"""
coll = self.getCollection(dbCollection)
try:
coll.update({queryKey:dic[queryKey]}, dic, upsert=True)
except Exception:
jsonAdd(self.jsonFile, dbCollection, 'upsert', dic, params={'queryKey': queryKey})
self.existsDataToDump = True
else:
if self.existsDataToDump:
self.existsDataToDump = self.dumpJson()
def update(self, dbCollection, dic, conditionKey, conditionValue):
"""
It updates an entry of the database if its conditionKey is equal to the conditionValue.
Args:
dbCollection: Collection name.
dic: Dictionary containing the variables and their new values.
conditionKey: Variable to use as condition.
conditionValue: Value of the conditionKey variable.
"""
coll = self.getCollection(dbCollection)
try:
coll.update({conditionKey: conditionValue}, {'$set': dic}, False)
except Exception:
pass
else:
if self.existsDataToDump:
self.existsDataToDump = self.dumpJson()
def queryOne(self, dbCollection, conditionKey, conditionValue):
"""
It searches for the first instance that achieves the condition.
Args:
dbCollection: Container name of the database.
conditionKey: Variable to use as condition.
conditionValue: Value of the conditionKey variable.
Returns:
Returns a dictionary with the name of the variable as a key and its value.
"""
coll = self.getCollection(dbCollection)
return coll.find_one({conditionKey:conditionValue}, {'_id': False})
def queryAll(self, dbCollection):
"""
It gets all the entries of a collection.
Args:
dbCollection: Collection name.
Returns:
Returns a list, where each position is an entry and contains a dictionary with the name of
the variable as a key and its value.
"""
coll = self.getCollection(dbCollection)
return list(coll.find(None, {'_id': False}))
def querySortLimit(self, dbCollection, attribute, sortOrder, limit):
"""
It gets the first or last 'limit' entries of a collection.
Args:
dbCollection: Collection name.
attribute: Variable name that will be sorted.
sortOrder: 1 is ascending order, -1 is descending order.
limit: Number of entries to fetch. If it is 0 it will fetch all the entries.
Returns:
Returns an ordered list, where each position is an entry and contains a dictionary with the name of
the variable as a key and its value.
"""
coll = self.getCollection(dbCollection)
return coll.find(None, {'_id': False}).sort([(attribute, sortOrder)]).limit(limit)
def queryBetweenValues(self, dbCollection, attribute, minValue, maxValue):
"""
It searches for all the entries between the minValue and maxValue for the variable attribute.
Args:
dbCollection: Collection name.
attribute: Name of the variable to make the query.
minValue: Minimum value for the attribute.
maxValue: Maximum value for the attribute.
Returns:
Returns a list, where each position is an entry and contains a dictionary with the name of
the variable as a key and its value.
"""
coll = self.getCollection(dbCollection)
return list(coll.find({attribute: {"$gte": minValue, "$lte": maxValue}}, {'_id': False}))
def deleteALL(self, dbCollection):
"""
It deletes all the entries of a collection.
Args:
dbCollection: Collection name.
"""
coll = self.getCollection(dbCollection)
try:
coll.remove()
except Exception:
pass
else:
if self.existsDataToDump:
self.existsDataToDump = self.dumpJson()
|
TathagataChakraborti/resource-conflicts
|
refs/heads/master
|
PLANROB-2015/seq-sat-lama/Python-2.5.2/Demo/tkinter/matt/canvas-mult-item-sel.py
|
47
|
from Tkinter import *
# allows moving dots with multiple selection.
SELECTED_COLOR = "red"
UNSELECTED_COLOR = "blue"
class Test(Frame):
###################################################################
###### Event callbacks for THE CANVAS (not the stuff drawn on it)
###################################################################
def mouseDown(self, event):
# see if we're inside a dot. If we are, it
# gets tagged as CURRENT for free by tk.
if not event.widget.find_withtag(CURRENT):
# we clicked outside of all dots on the canvas. unselect all.
# re-color everything back to an unselected color
self.draw.itemconfig("selected", fill=UNSELECTED_COLOR)
# unselect everything
self.draw.dtag("selected")
else:
# mark as "selected" the thing the cursor is under
self.draw.addtag("selected", "withtag", CURRENT)
# color it as selected
self.draw.itemconfig("selected", fill=SELECTED_COLOR)
self.lastx = event.x
self.lasty = event.y
def mouseMove(self, event):
self.draw.move("selected", event.x - self.lastx, event.y - self.lasty)
self.lastx = event.x
self.lasty = event.y
def makeNewDot(self):
# create a dot, and mark it as current
fred = self.draw.create_oval(0, 0, 20, 20,
fill=SELECTED_COLOR, tags=CURRENT)
# and make it selected
self.draw.addtag("selected", "withtag", CURRENT)
def createWidgets(self):
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
################
# make the canvas and bind some behavior to it
################
self.draw = Canvas(self, width="5i", height="5i")
Widget.bind(self.draw, "<1>", self.mouseDown)
Widget.bind(self.draw, "<B1-Motion>", self.mouseMove)
# and other things.....
self.button = Button(self, text="make a new dot", foreground="blue",
command=self.makeNewDot)
message = ("%s dots are selected and can be dragged.\n"
"%s are not selected.\n"
"Click in a dot to select it.\n"
"Click on empty space to deselect all dots."
) % (SELECTED_COLOR, UNSELECTED_COLOR)
self.label = Message(self, width="5i", text=message)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.label.pack(side=BOTTOM, fill=X, expand=1)
self.button.pack(side=BOTTOM, fill=X)
self.draw.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
|
capooti/geonode
|
refs/heads/2.0-wfp
|
geonode/proxy/tests.py
|
3
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from django.test.utils import override_settings, str_prefix
from geonode.proxy.views import validate_host
from geonode.utils import ogc_server_settings
class ProxyTest(TestCase):
def setUp(self):
self.admin, created = User.objects.get_or_create(username='admin', password='admin', is_superuser=True)
@override_settings(DEBUG=True, PROXY_ALLOWED_HOSTS=())
def test_validate_host_disabled_in_debug(self):
"""If PROXY_ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass the proxy."""
c = Client()
response = c.get('/proxy?url=http://www.google.com', follow=True)
self.assertEqual(response.status_code, 200)
@override_settings(DEBUG=False, PROXY_ALLOWED_HOSTS=())
def test_validate_host_disabled_not_in_debug(self):
"""If PROXY_ALLOWED_HOSTS is empty and DEBUG is False requests should return 403."""
c = Client()
response = c.get('/proxy?url=http://www.google.com', follow=True)
self.assertEqual(response.status_code, 403)
@override_settings(DEBUG=False, PROXY_ALLOWED_HOSTS=('.google.com',))
def test_proxy_allowed_host(self):
"""If PROXY_ALLOWED_HOSTS is empty and DEBUG is False requests should return 403."""
c = Client()
response = c.get('/proxy?url=http://www.google.com', follow=True)
self.assertEqual(response.status_code, 200)
|
partofthething/home-assistant
|
refs/heads/dev
|
tests/components/awair/test_sensor.py
|
3
|
"""Tests for the Awair sensor platform."""
from unittest.mock import patch
from homeassistant.components.awair.const import (
API_CO2,
API_HUMID,
API_LUX,
API_PM10,
API_PM25,
API_SCORE,
API_SPL_A,
API_TEMP,
API_VOC,
ATTR_UNIQUE_ID,
DOMAIN,
SENSOR_TYPES,
)
from homeassistant.const import (
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
LIGHT_LUX,
PERCENTAGE,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
)
from .const import (
AWAIR_UUID,
CONFIG,
DEVICES_FIXTURE,
GEN1_DATA_FIXTURE,
GEN2_DATA_FIXTURE,
GLOW_DATA_FIXTURE,
MINT_DATA_FIXTURE,
OFFLINE_FIXTURE,
OMNI_DATA_FIXTURE,
UNIQUE_ID,
USER_FIXTURE,
)
from tests.common import MockConfigEntry
async def setup_awair(hass, fixtures):
"""Add Awair devices to hass, using specified fixtures for data."""
entry = MockConfigEntry(domain=DOMAIN, unique_id=UNIQUE_ID, data=CONFIG)
with patch("python_awair.AwairClient.query", side_effect=fixtures):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
def assert_expected_properties(
hass, registry, name, unique_id, state_value, attributes
):
"""Assert expected properties from a dict."""
entry = registry.async_get(name)
assert entry.unique_id == unique_id
state = hass.states.get(name)
assert state
assert state.state == state_value
for attr, value in attributes.items():
assert state.attributes.get(attr) == value
async def test_awair_gen1_sensors(hass):
"""Test expected sensors on a 1st gen Awair."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN1_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"88",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_temperature",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_TEMP][ATTR_UNIQUE_ID]}",
"21.8",
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS, "awair_index": 1.0},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_humidity",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_HUMID][ATTR_UNIQUE_ID]}",
"41.59",
{ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, "awair_index": 0.0},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_carbon_dioxide",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_CO2][ATTR_UNIQUE_ID]}",
"654.0",
{
ATTR_ICON: "mdi:cloud",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_PARTS_PER_MILLION,
"awair_index": 0.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_volatile_organic_compounds",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_VOC][ATTR_UNIQUE_ID]}",
"366",
{
ATTR_ICON: "mdi:cloud",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_PARTS_PER_BILLION,
"awair_index": 1.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
# gen1 unique_id should be awair_12345-DUST, which matches old integration behavior
f"{AWAIR_UUID}_DUST",
"14.3",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 1.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm10",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM10][ATTR_UNIQUE_ID]}",
"14.3",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 1.0,
},
)
# We should not have a dust sensor; it's aliased as pm2.5
# and pm10 sensors.
assert hass.states.get("sensor.living_room_dust") is None
# We should not have sound or lux sensors.
assert hass.states.get("sensor.living_room_sound_level") is None
assert hass.states.get("sensor.living_room_illuminance") is None
async def test_awair_gen2_sensors(hass):
"""Test expected sensors on a 2nd gen Awair."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN2_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"97",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM25][ATTR_UNIQUE_ID]}",
"2.0",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 0.0,
},
)
# The Awair 2nd gen reports specifically a pm2.5 sensor,
# and so we don't alias anything. Make sure we didn't do that.
assert hass.states.get("sensor.living_room_pm10") is None
async def test_awair_mint_sensors(hass):
"""Test expected sensors on an Awair mint."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, MINT_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"98",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM25][ATTR_UNIQUE_ID]}",
"1.0",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 0.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_illuminance",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_LUX][ATTR_UNIQUE_ID]}",
"441.7",
{ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX},
)
# The Mint does not have a CO2 sensor.
assert hass.states.get("sensor.living_room_carbon_dioxide") is None
async def test_awair_glow_sensors(hass):
"""Test expected sensors on an Awair glow."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GLOW_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"93",
{ATTR_ICON: "mdi:blur"},
)
# The glow does not have a particle sensor
assert hass.states.get("sensor.living_room_pm2_5") is None
async def test_awair_omni_sensors(hass):
"""Test expected sensors on an Awair omni."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, OMNI_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"99",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_sound_level",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SPL_A][ATTR_UNIQUE_ID]}",
"47.0",
{ATTR_ICON: "mdi:ear-hearing", ATTR_UNIT_OF_MEASUREMENT: "dBa"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_illuminance",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_LUX][ATTR_UNIQUE_ID]}",
"804.9",
{ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX},
)
async def test_awair_offline(hass):
"""Test expected behavior when an Awair is offline."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, OFFLINE_FIXTURE]
await setup_awair(hass, fixtures)
# The expected behavior is that we won't have any sensors
# if the device is not online when we set it up. python_awair
# does not make any assumptions about what sensors a device
# might have - they are created dynamically.
# We check for the absence of the "awair score", which every
# device *should* have if it's online. If we don't see it,
# then we probably didn't set anything up. Which is correct,
# in this case.
assert hass.states.get("sensor.living_room_awair_score") is None
async def test_awair_unavailable(hass):
"""Test expected behavior when an Awair becomes offline later."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN1_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"88",
{ATTR_ICON: "mdi:blur"},
)
with patch("python_awair.AwairClient.query", side_effect=OFFLINE_FIXTURE):
await hass.helpers.entity_component.async_update_entity(
"sensor.living_room_awair_score"
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
STATE_UNAVAILABLE,
{ATTR_ICON: "mdi:blur"},
)
|
aptivate/econsensus
|
refs/heads/master
|
django/econsensus/publicweb/tests/resending_message_test.py
|
3
|
from django.test.testcases import SimpleTestCase
from mock import patch, Mock
from publicweb.extra_models import FEEDBACK_ADDED_NOTIFICATIONS,\
NotificationSettings, FEEDBACK_MAJOR_CHANGES
from django_dynamic_fixture import N
from django.contrib.auth.models import User
from publicweb.models import Decision, additional_message_required
def get_or_create(**kwargs):
return N(
NotificationSettings,
notification_level=FEEDBACK_ADDED_NOTIFICATIONS
), True
class ResendingMessageTest(SimpleTestCase):
@patch('publicweb.models.NotificationSettings.objects',
new=Mock(get_or_create=get_or_create)
)
@patch('publicweb.models.notification',
new=Mock(is_observing=lambda a,b: False))
def test_message_required_if_major_changes_only_user_watches_adds_comment(self):
user_1 = N(User, id=1)
decision = N(Decision, author=user_1, editor=user_1, id=1)
decision.watchers = []
self.assertTrue(
additional_message_required(
user_1, decision, FEEDBACK_MAJOR_CHANGES
)
)
@patch('publicweb.models.NotificationSettings.objects',
new=Mock(get_or_create=lambda organization, user: (
N(
NotificationSettings,
notification_level=FEEDBACK_MAJOR_CHANGES
),
True
)
)
)
@patch('publicweb.models.notification',
new=Mock(is_observing=lambda a,b: False))
def test_message_not_required_if_feedback_major_notification_user_watches_adds_comment(self):
user_1 = N(User, id=1)
decision = N(Decision, author=user_1, editor=user_1, id=1)
self.assertFalse(
additional_message_required(
user_1, decision, FEEDBACK_MAJOR_CHANGES
)
)
@patch('publicweb.models.NotificationSettings.objects',
new=Mock(get_or_create=get_or_create)
)
@patch('publicweb.models.notification',
new=Mock(is_observing=lambda a,b: True))
def test_message_not_required_if_user_already_observing(self):
user_1 = N(User, id=1)
decision = N(Decision, author=user_1, editor=user_1, id=1)
self.assertFalse(
additional_message_required(
user_1, decision, FEEDBACK_MAJOR_CHANGES
)
)
@patch('publicweb.models.NotificationSettings.objects',
new=Mock(get_or_create=get_or_create)
)
@patch('publicweb.models.notification',
new=Mock(is_observing=lambda a,b: False))
def test_message_required_if_user_not_already_observing(self):
user_1 = N(User, id=1)
decision = N(Decision, author=user_1, editor=user_1, id=1)
self.assertTrue(
additional_message_required(
user_1, decision, FEEDBACK_MAJOR_CHANGES
)
)
|
farhaanbukhsh/networkx
|
refs/heads/master
|
networkx/linalg/tests/test_graphmatrix.py
|
40
|
from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestGraphMatrix(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global assert_equal
global assert_almost_equal
try:
import numpy
import scipy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('SciPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.OI=numpy.array([[-1, -1, -1, 0],
[1, 0, 0, -1],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]])
self.A=numpy.array([[0, 1, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges_iter() )
self.WG.add_node(4)
self.WA=numpy.array([[0 , 0.5, 0.5, 0.5, 0],
[0.5, 0 , 0.5, 0 , 0],
[0.5, 0.5, 0 , 0 , 0],
[0.5, 0 , 0 , 0 , 0],
[0 , 0 , 0 , 0 , 0]])
self.MG=nx.MultiGraph(self.G)
self.MG2=self.MG.copy()
self.MG2.add_edge(0,1)
self.MG2A=numpy.array([[0, 2, 1, 1, 0],
[2, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.MGOI=numpy.array([[-1, -1, -1, -1, 0],
[1, 1, 0, 0, -1],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]])
def test_incidence_matrix(self):
"Conversion to incidence matrix"
assert_equal(nx.incidence_matrix(self.G,oriented=True).todense(),self.OI)
assert_equal(nx.incidence_matrix(self.G).todense(),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.MG,oriented=True).todense(),self.OI)
assert_equal(nx.incidence_matrix(self.MG).todense(),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.MG2,oriented=True).todense(),self.MGOI)
assert_equal(nx.incidence_matrix(self.MG2).todense(),numpy.abs(self.MGOI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True).todense(),self.OI)
assert_equal(nx.incidence_matrix(self.WG).todense(),numpy.abs(self.OI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True,
weight='weight').todense(),0.5*self.OI)
assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(),
numpy.abs(0.5*self.OI))
assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(),
0.3*self.OI)
WMG=nx.MultiGraph(self.WG)
WMG.add_edge(0,1,attr_dict={'weight':0.5,'other':0.3})
assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(),
numpy.abs(0.5*self.MGOI))
assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(),
0.5*self.MGOI)
assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(),
0.3*self.MGOI)
def test_adjacency_matrix(self):
"Conversion to adjacency matrix"
assert_equal(nx.adj_matrix(self.G).todense(),self.A)
assert_equal(nx.adj_matrix(self.MG).todense(),self.A)
assert_equal(nx.adj_matrix(self.MG2).todense(),self.MG2A)
assert_equal(nx.adj_matrix(self.G,nodelist=[0,1]).todense(),self.A[:2,:2])
assert_equal(nx.adj_matrix(self.WG).todense(),self.WA)
assert_equal(nx.adj_matrix(self.WG,weight=None).todense(),self.A)
assert_equal(nx.adj_matrix(self.MG2,weight=None).todense(),self.MG2A)
assert_equal(nx.adj_matrix(self.WG,weight='other').todense(),0.6*self.WA)
|
koobonil/Boss2D
|
refs/heads/master
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
|
3
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as rnn_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(rnn_lib.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
self._output_size = tensor_shape.TensorShape(self._dims)
self._state_size = (tensor_shape.TensorShape(self._dims),
tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return array_ops.identity(state)
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(
values[-1],
max_length * np.ones(
(batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("drop_scope"):
dropped_outputs, _ = rnn.static_rnn(
full_dropout_cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(dropped_outputs,
feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def _testDynamicCalculation(self, use_gpu):
cell = Plus1RNNCell()
sequence_length = array_ops.placeholder(dtypes.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=use_gpu) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
dynamic_state_value = sess.run(
[dynamic_state],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(dynamic_values[2],
np.vstack((np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(dynamic_state_value[0],
np.vstack((1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def testDynamicCalculation(self):
self._testDynamicCalculation(True)
self._testDynamicCalculation(False)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testNoProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testCellClipping(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
cell_clip=0.0,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def _testNoProjNoShardingSimpleStateSaver(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={inputs[0]: input_value})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={inputs[0]: input_value})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, {
"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3
})
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(list(nest.flatten(state)),
feed_dict={inputs[0]: input_value})
saved_states = sess.run(list(state_saver.saved_state.values()),
feed_dict={inputs[0]: input_value})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(last_states[i],
named_saved_states[flat_state_names[i]])
def _testProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_tuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_notuple, state_notuple = rnn.static_rnn(
cell_notuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
# TODO(ebrevdo): For this test, we ensure values are identical and
# therefore the weights here are tied. In the future, we may consider
# making the state_is_tuple property mutable so we can avoid
# having to do this - especially if users ever need to reuse
# the parameters from different RNNCell instances. Right now,
# this seems an unrealistic use case except for testing.
cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access
outputs_tuple, state_tuple = rnn.static_rnn(
cell_tuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(outputs_notuple,
feed_dict={inputs[0]: input_value})
outputs_tuple_v = sess.run(outputs_tuple,
feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run((state_notuple,),
feed_dict={inputs[0]: input_value})
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def _testProjSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testDoubleInput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(
cell,
inputs,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def _testShardNoShardEquivalentOutput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
cell_noshard = rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = rnn.static_rnn(
cell_noshard, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("shard_scope"):
outputs_shard, state_shard = rnn.static_rnn(
cell_shard, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def _testDoubleInputWithDropoutAndDynamicCalculation(self, use_gpu):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
sequence_length = array_ops.placeholder(dtypes.int64)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = rnn.static_rnn(
dropout_cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run(
feed_dict={sequence_length: [2, 3]})
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
outputs, feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
state_value = sess.run(
[state], feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_d = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer_d,
state_is_tuple=False)
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("diff_scope"):
outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(outputs0 + outputs1 + outputs2,
feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
with ops_lib.name_scope("scope0"):
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with ops_lib.name_scope("scope1"):
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(outputs0 + outputs1,
feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def testNoProjNoShardingSimpleStateSaver(self):
self._testNoProjNoShardingSimpleStateSaver(use_gpu=False)
self._testNoProjNoShardingSimpleStateSaver(use_gpu=True)
def testNoProjNoSharding(self):
self._testNoProjNoSharding(use_gpu=False)
self._testNoProjNoSharding(use_gpu=True)
def testCellClipping(self):
self._testCellClipping(use_gpu=False)
self._testCellClipping(use_gpu=True)
def testProjNoSharding(self):
self._testProjNoSharding(use_gpu=False)
self._testProjNoSharding(use_gpu=True)
def testProjSharding(self):
self._testProjSharding(use_gpu=False)
self._testProjSharding(use_gpu=True)
def testShardNoShardEquivalentOutput(self):
self._testShardNoShardEquivalentOutput(use_gpu=False)
self._testShardNoShardEquivalentOutput(use_gpu=True)
def testDoubleInput(self):
self._testDoubleInput(use_gpu=False)
self._testDoubleInput(use_gpu=True)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=False)
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=True)
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
cell = rnn_cell.GRUCell(30)
# Smoke test, this should not raise an error
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
inputs_c = array_ops.stack(inputs)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(outputs_static,
feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(outputs_dynamic,
feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(state_static,
feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(state_dynamic,
feed_dict={inputs[0]: input_value})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None, input_size))
]
inputs_c = array_ops.stack(inputs)
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=True,
num_proj=num_proj + i,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(outputs_static,
feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(outputs_dynamic,
feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(nest.flatten(state_static),
feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(nest.flatten(state_dynamic),
feed_dict={inputs[0]: input_value})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
def _testDynamicEquivalentToStaticRNN(self, use_gpu, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
inputs = array_ops.unstack(concat_inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_static, state_static = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, ("Count of trainable variables: %d" %
len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(static_individual_gradients,
feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
inputs = array_ops.unstack(concat_inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in
[split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, ("Count of trainable variables: %d" %
len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in
[split_outputs_dynamic[0], split_outputs_dynamic[-1], state_dynamic]
])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(dynamic_individual_gradients,
feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(
len(static_individual_grad_values), len(dynamic_individual_grad_values))
self.assertEqual(
len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(
zip(static_individual_grad_values, dynamic_individual_grad_values)):
tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf_logging.info("Comparing individual variable gradients iteration %d" %
i)
self.assertAllEqual(a, b)
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=True)
class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self,
use_gpu,
use_shape,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_gpu, use_shape, True))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_gpu, use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_gpu=False, use_shape=False)
self._testBidirectionalRNN(use_gpu=True, use_shape=False)
self._testBidirectionalRNN(use_gpu=False, use_shape=True)
self._testBidirectionalRNN(use_gpu=True, use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(
use_gpu=False, use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(
use_gpu=True, use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(
use_gpu=False, use_shape=True)
self._testBidirectionalRNNWithoutSequenceLength(
use_gpu=True, use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_gpu,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_gpu, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(use_gpu, use_shape,
use_state_tuple, use_time_major,
use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
feed_dict = (
{sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
# First sequence in batch is length=2
# Check that the t=0 forward output is equal to t=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the t=1 forward output is equal to t=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the t=0 forward output is equal to t=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the t=1 forward output is equal to t=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the t=2 forward output is equal to t=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should
# be exactly the same
self.assertAllClose(s_fw, s_bw)
else: # not use_sequence_length
max_length = 8 # from createBidirectionalDynamicRNN
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
# Generate 2^5 option values
# from [True, True, True, True, True] to [False, False, False, False, False]
options = itertools.product([True, False], repeat=5)
for option in options:
self._testBidirectionalDynamicRNN(
use_gpu=option[0],
use_shape=option[1],
use_state_tuple=option[2],
use_time_major=option[3],
use_sequence_length=option[4])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_gpu=True, use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_gpu=True,
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(outputs_static,
feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(outputs_dynamic,
feed_dict={inputs[0]: input_value})
outputs_bid_v = sess.run(outputs_bid,
feed_dict={inputs_using_dim[0]: input_value})
outputs_sav_v = sess.run(outputs_sav,
feed_dict={inputs_using_dim[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(state_static,
feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(state_dynamic,
feed_dict={inputs[0]: input_value})
state_bid_fw_v = sess.run(state_fw,
feed_dict={inputs_using_dim[0]: input_value})
state_bid_bw_v = sess.run(state_bw,
feed_dict={inputs_using_dim[0]: input_value})
state_sav_v = sess.run(state_sav,
feed_dict={inputs_using_dim[0]: input_value})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)), array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(outputs_dynamic,
feed_dict={single_input: input_value})
outputs_static_v = sess.run(outputs_static,
feed_dict={single_input: input_value})
outputs_sav_v = sess.run(outputs_sav,
feed_dict={single_input_using_dim: input_value})
outputs_bid_v = sess.run(outputs_bid,
feed_dict={single_input_using_dim: input_value})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(state_dynamic,
feed_dict={single_input: input_value})
state_static_v = sess.run(state_static,
feed_dict={single_input: input_value})
state_bid_fw_v = sess.run(state_fw,
feed_dict={single_input_using_dim: input_value})
state_bid_bw_v = sess.run(state_bw,
feed_dict={single_input_using_dim: input_value})
state_sav_v = sess.run(state_sav,
feed_dict={single_input_using_dim: input_value})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class StateSaverRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
def factory(scope):
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_state_saving_rnn(
cell,
inputs,
state_saver=state_saver,
state_name="save_lstm",
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testDynamic(self, use_gpu):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=use_gpu, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def testDynamic(self):
self._testDynamic(use_gpu=False)
self._testDynamic(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
return rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=ops_lib.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={inputs: rand_input,
sequence_length: rand_seq_len})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={inputs: rand_input,
sequence_length: rand_seq_len})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={inputs: rand_input,
sequence_length: rand_seq_len})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=ops_lib.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros(
[2, 3], dtype=dtypes.int32), array_ops.zeros(
[1], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones(
[batch_size, 2, 3], dtype=dtypes.int32), array_ops.ones(
[batch_size, 1], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output)
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(rnn_cell.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with ops_lib.device(self._device):
return self._cell(input_, state, scope)
else:
return self._cell(input_, state, scope)
class TensorArrayOnCorrectDeviceTest(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps,
input_size).astype(np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops_lib.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops_lib.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(input_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.