repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
PhilipOakley/MSYS2-pacman | refs/heads/master | test/pacman/pmtest.py | 1 | # Copyright (c) 2006 by Aurelien Foret <orelien@chez.com>
# Copyright (c) 2006-2015 Pacman Development Team <pacman-dev@archlinux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shlex
import shutil
import stat
import subprocess
import time
import pmrule
import pmdb
import pmfile
import tap
import util
from util import vprint
class pmtest(object):
"""Test object
"""
def __init__(self, name, root):
self.name = name
self.testname = os.path.basename(name).replace('.py', '')
self.root = root
self.dbver = 9
self.cachepkgs = True
self.cmd = ["pacman", "--noconfirm",
"--config", self.configfile(),
"--root", self.rootdir(),
"--dbpath", self.dbdir(),
"--cachedir", self.cachedir()]
def __str__(self):
return "name = %s\n" \
"testname = %s\n" \
"root = %s" % (self.name, self.testname, self.root)
def addpkg2db(self, treename, pkg):
if not treename in self.db:
self.db[treename] = pmdb.pmdb(treename, self.root)
self.db[treename].pkgs.append(pkg)
def addpkg(self, pkg):
self.localpkgs.append(pkg)
def findpkg(self, name, version, allow_local=False):
"""Find a package object matching the name and version specified in
either sync databases or the local package collection. The local database
is allowed to match if allow_local is True."""
for db in self.db.values():
if db.is_local and not allow_local:
continue
pkg = db.getpkg(name)
if pkg and pkg.version == version:
return pkg
for pkg in self.localpkgs:
if pkg.name == name and pkg.version == version:
return pkg
return None
def addrule(self, rulename):
rule = pmrule.pmrule(rulename)
self.rules.append(rule)
def load(self):
# Reset test parameters
self.result = {
"success": 0,
"fail": 0
}
self.args = ""
self.retcode = 0
self.db = {
"local": pmdb.pmdb("local", self.root)
}
self.localpkgs = []
self.createlocalpkgs = False
self.filesystem = []
self.description = ""
self.option = {}
# Test rules
self.rules = []
self.files = []
self.expectfailure = False
if os.path.isfile(self.name):
# all tests expect this to be available
from pmpkg import pmpkg
with open(self.name) as input:
exec(input.read(),locals())
else:
raise IOError("file %s does not exist!" % self.name)
def generate(self, pacman):
tap.diag("==> Generating test environment")
# Cleanup leftover files from a previous test session
if os.path.isdir(self.root):
shutil.rmtree(self.root)
vprint("\t%s" % self.root)
# Create directory structure
vprint(" Creating directory structure:")
dbdir = os.path.join(self.root, util.PM_SYNCDBPATH)
cachedir = os.path.join(self.root, util.PM_CACHEDIR)
syncdir = os.path.join(self.root, util.SYNCREPO)
tmpdir = os.path.join(self.root, util.TMPDIR)
logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE))
etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF))
bindir = os.path.join(self.root, "bin")
shell = pacman["scriptlet-shell"][1:]
shelldir = os.path.join(self.root, os.path.dirname(shell))
sys_dirs = [dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir,
shelldir]
for sys_dir in sys_dirs:
if not os.path.isdir(sys_dir):
vprint("\t%s" % sys_dir[len(self.root)+1:])
os.makedirs(sys_dir, 0o755)
# Only the dynamically linked binary is needed for fakechroot
shutil.copy("/bin/sh", bindir)
if shell != "bin/sh":
shutil.copy("/bin/sh", os.path.join(self.root, shell))
# Configuration file
vprint(" Creating configuration file")
util.mkcfgfile(util.PACCONF, self.root, self.option, self.db)
# Creating packages
vprint(" Creating package archives")
for pkg in self.localpkgs:
vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename()))
pkg.finalize()
pkg.makepkg(tmpdir)
for key, value in self.db.items():
for pkg in value.pkgs:
pkg.finalize()
if key == "local" and not self.createlocalpkgs:
continue
for pkg in value.pkgs:
vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename()))
if self.cachepkgs:
pkg.makepkg(cachedir)
else:
pkg.makepkg(os.path.join(syncdir, value.treename))
pkg.md5sum = util.getmd5sum(pkg.path)
pkg.csize = os.stat(pkg.path)[stat.ST_SIZE]
# Creating sync database archives
vprint(" Creating databases")
for key, value in self.db.items():
vprint("\t" + value.treename)
value.generate()
# Filesystem
vprint(" Populating file system")
for f in self.filesystem:
vprint("\t%s" % f)
util.mkfile(self.root, f, f)
path = os.path.join(self.root, f)
if os.path.isfile(path):
os.utime(path, (355, 355))
for pkg in self.db["local"].pkgs:
vprint("\tinstalling %s" % pkg.fullname())
pkg.install_package(self.root)
if self.db["local"].pkgs and self.dbver >= 9:
path = os.path.join(self.root, util.PM_DBPATH, "local")
util.mkfile(path, "ALPM_DB_VERSION", str(self.dbver))
# Done.
vprint(" Taking a snapshot of the file system")
for filename in self.snapshots_needed():
f = pmfile.PacmanFile(self.root, filename)
self.files.append(f)
vprint("\t%s" % f.name)
def snapshots_needed(self):
files = set()
for r in self.rules:
files.update(r.snapshots_needed())
return files
def run(self, pacman):
if os.path.isfile(util.PM_LOCK):
tap.bail("\tERROR: another pacman session is on-going -- skipping")
return
tap.diag("==> Running test")
vprint("\tpacman %s" % self.args)
cmd = []
if pacman["gdb"]:
cmd.extend(["libtool", "execute", "gdb", "--args"])
if pacman["valgrind"]:
suppfile = os.path.join(os.path.dirname(__file__),
'..', '..', 'valgrind.supp')
cmd.extend(["libtool", "execute", "valgrind", "-q",
"--tool=memcheck", "--leak-check=full",
"--show-reachable=yes",
"--gen-suppressions=all",
"--child-silent-after-fork=yes",
"--log-file=%s" % os.path.join(self.root, "var/log/valgrind"),
"--suppressions=%s" % suppfile])
self.addrule("FILE_EMPTY=var/log/valgrind")
# replace program name with absolute path
prog = pacman["bin"]
if not prog:
prog = util.which(self.cmd[0], pacman["bindir"])
if not prog or not os.access(prog, os.X_OK):
if not prog:
tap.bail("could not locate '%s' binary" % (self.cmd[0]))
return
cmd.append(os.path.abspath(prog))
cmd.extend(self.cmd[1:])
if pacman["manual-confirm"]:
cmd.append("--confirm")
if pacman["debug"]:
cmd.append("--debug=%s" % pacman["debug"])
cmd.extend(shlex.split(self.args))
if not (pacman["gdb"] or pacman["nolog"]):
output = open(os.path.join(self.root, util.LOGFILE), 'w')
else:
output = None
vprint("\trunning: %s" % " ".join(cmd))
# Change to the tmp dir before running pacman, so that local package
# archives are made available more easily.
time_start = time.time()
self.retcode = subprocess.call(cmd,
cwd=os.path.join(self.root, util.TMPDIR),
env={'LC_ALL': 'C', 'PATH': os.environ['PATH']})
time_end = time.time()
vprint("\ttime elapsed: %.2fs" % (time_end - time_start))
if output:
output.close()
vprint("\tretcode = %s" % self.retcode)
# Check if the lock is still there
if os.path.isfile(util.PM_LOCK):
tap.diag("\tERROR: %s not removed" % util.PM_LOCK)
os.unlink(util.PM_LOCK)
# Look for a core file
if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
tap.diag("\tERROR: pacman dumped a core file")
def check(self):
tap.plan(len(self.rules))
for i in self.rules:
success = i.check(self)
if success == 1:
self.result["success"] += 1
else:
self.result["fail"] += 1
tap.ok(success, i)
def configfile(self):
return os.path.join(self.root, util.PACCONF)
def dbdir(self):
return os.path.join(self.root, util.PM_DBPATH)
def rootdir(self):
return self.root + '/'
def cachedir(self):
return os.path.join(self.root, util.PM_CACHEDIR)
# vim: set ts=4 sw=4 et:
|
hatbot-team/hatbot_resources | refs/heads/master | tests/test_determinacy.py | 1 | import unittest
import nose
__author__ = 'moskupols'
from preparation.resources import Resource
from tests.trunk_aware import trunk_parametrized, asset_cache
@trunk_parametrized()
def test_determinacy(trunk):
resource_class = Resource.resource_by_trunk(trunk)
r1, r2 = asset_cache(trunk), tuple(Resource.applied_modifiers(resource_class()))
unittest.TestCase().assertTupleEqual(r1, r2)
if __name__ == '__main__':
nose.main()
|
MJuddBooth/pandas | refs/heads/master | pandas/tests/io/json/test_readlines.py | 2 | # -*- coding: utf-8 -*-
import pytest
from pandas.compat import StringIO
import pandas as pd
from pandas import DataFrame, read_json
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_series_equal, ensure_clean)
from pandas.io.json.json import JsonReader
@pytest.fixture
def lines_json_df():
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars():
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_to_jsonl():
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
columns=["a\\", 'b'])
result = df.to_json(orient="records", lines=True)
expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
'{"a\\\\":"foo\\"","b":"bar"}')
assert result == expected
assert_frame_equal(read_json(result, lines=True), df)
@pytest.mark.parametrize("chunksize", [1, 1.0])
def test_readjson_chunks(lines_json_df, chunksize):
# Basic test that read_json(chunks=True) gives the same result as
# read_json(chunks=False)
# GH17048: memory usage when lines=True
unchunked = read_json(StringIO(lines_json_df), lines=True)
reader = read_json(StringIO(lines_json_df), lines=True,
chunksize=chunksize)
chunked = pd.concat(reader)
assert_frame_equal(chunked, unchunked)
def test_readjson_chunksize_requires_lines(lines_json_df):
msg = "chunksize can only be passed if lines=True"
with pytest.raises(ValueError, match=msg):
pd.read_json(StringIO(lines_json_df), lines=False, chunksize=2)
def test_readjson_chunks_series():
# Test reading line-format JSON to Series with chunksize param
s = pd.Series({'A': 1, 'B': 2})
strio = StringIO(s.to_json(lines=True, orient="records"))
unchunked = pd.read_json(strio, lines=True, typ='Series')
strio = StringIO(s.to_json(lines=True, orient="records"))
chunked = pd.concat(pd.read_json(
strio, lines=True, typ='Series', chunksize=1
))
assert_series_equal(chunked, unchunked)
def test_readjson_each_chunk(lines_json_df):
# Other tests check that the final result of read_json(chunksize=True)
# is correct. This checks the intermediate chunks.
chunks = list(
pd.read_json(StringIO(lines_json_df), lines=True, chunksize=2)
)
assert chunks[0].shape == (2, 2)
assert chunks[1].shape == (1, 2)
def test_readjson_chunks_from_file():
with ensure_clean('test.json') as path:
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
chunked = pd.concat(pd.read_json(path, lines=True, chunksize=1))
unchunked = pd.read_json(path, lines=True)
assert_frame_equal(unchunked, chunked)
@pytest.mark.parametrize("chunksize", [None, 1])
def test_readjson_chunks_closes(chunksize):
with ensure_clean('test.json') as path:
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
reader = JsonReader(
path, orient=None, typ="frame", dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=True, numpy=False,
precise_float=False, date_unit=None, encoding=None,
lines=True, chunksize=chunksize, compression=None)
reader.read()
assert reader.open_stream.closed, "didn't close stream with \
chunksize = {chunksize}".format(chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
def test_readjson_invalid_chunksize(lines_json_df, chunksize):
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
pd.read_json(StringIO(lines_json_df), lines=True,
chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [None, 1, 2])
def test_readjson_chunks_multiple_empty_lines(chunksize):
j = """
{"A":1,"B":4}
{"A":2,"B":5}
{"A":3,"B":6}
"""
orig = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
test = pd.read_json(j, lines=True, chunksize=chunksize)
if chunksize is not None:
test = pd.concat(test)
tm.assert_frame_equal(
orig, test, obj="chunksize: {chunksize}".format(chunksize=chunksize))
|
cdeil/sphinx-tutorial | refs/heads/master | astrospam/ham.py | 1 | """This is the ``ham.py`` docstring.
"""
class Ham(object):
"""This is the ``Ham`` class docstring.
"""
|
jamesfolberth/jupyterhub_AWS_deployment | refs/heads/master | notebooks/data8_notebooks/lab07/tests/q3_2.py | 3 | test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # Make sure your column labels are correct.
>>> faithful_predictions.labels == ('duration', 'wait', 'predicted wait')
True
>>> abs(1 - np.mean(faithful_predictions.column(2))/100) <= 0.35
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
dgies/incubator-airflow | refs/heads/master | airflow/contrib/operators/jira_operator.py | 46 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.jira_hook import JIRAError
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class JiraOperator(BaseOperator):
"""
JiraOperator to interact and perform action on Jira issue tracking system.
This operator is designed to use Jira Python SDK: http://jira.readthedocs.io
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param jira_method: method name from Jira Python SDK to be called
:type jira_method: str
:param jira_method_args: required method parameters for the jira_method
:type jira_method_args: dict
:param result_processor: function to further process the response from Jira
:type result_processor: function
:param get_jira_resource_method: function or operator to get jira resource
on which the provided jira_method will be executed
:type get_jira_resource_method: function
"""
template_fields = ("jira_method_args",)
@apply_defaults
def __init__(self,
jira_conn_id='jira_default',
jira_method=None,
jira_method_args=None,
result_processor=None,
get_jira_resource_method=None,
*args,
**kwargs):
super(JiraOperator, self).__init__(*args, **kwargs)
self.jira_conn_id = jira_conn_id
self.method_name = jira_method
self.jira_method_args = jira_method_args
self.result_processor = result_processor
self.get_jira_resource_method = get_jira_resource_method
def execute(self, context):
try:
if self.get_jira_resource_method is not None:
# if get_jira_resource_method is provided, jira_method will be executed on
# resource returned by executing the get_jira_resource_method.
# This makes all the provided methods of JIRA sdk accessible and usable
# directly at the JiraOperator without additional wrappers.
# ref: http://jira.readthedocs.io/en/latest/api.html
if isinstance(self.get_jira_resource_method, JiraOperator):
resource = self.get_jira_resource_method.execute(**context)
else:
resource = self.get_jira_resource_method(**context)
else:
# Default method execution is on the top level jira client resource
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.client
# Current Jira-Python SDK (1.0.7) has issue with pickling the jira response.
# ex: self.xcom_push(context, key='operator_response', value=jira_response)
# This could potentially throw error if jira_result is not picklable
jira_result = getattr(resource, self.method_name)(**self.jira_method_args)
if self.result_processor:
return self.result_processor(context, jira_result)
return jira_result
except JIRAError as jira_error:
raise AirflowException("Failed to execute jiraOperator, error: %s"
% str(jira_error))
except Exception as e:
raise AirflowException("Jira operator error: %s" % str(e))
|
tuxfux-hlp-notes/python-batches | refs/heads/master | batch-67/19-files/myenv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py | 762 | import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
ojengwa/talk | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/gis/geos/tests/test_io.py | 74 | from __future__ import unicode_literals
import binascii
import unittest
from unittest import skipUnless
from django.contrib.gis import memoryview
from ..import HAS_GEOS
if HAS_GEOS:
from .. import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt.encode())
g2 = wkt_r.read(wkt)
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept six.string_types objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, memoryview(b'foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref).decode())
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = b'000000000140140000000000004037000000000000'
wkb = memoryview(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = b'010100000000000000000014400000000000003740'
wkb1 = memoryview(binascii.a2b_hex(hex1))
hex2 = b'000000000140140000000000004037000000000000'
wkb2 = memoryview(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = b'0101000080000000000000144000000000000037400000000000003140'
wkb3d = memoryview(binascii.a2b_hex(hex3d))
hex3d_srid = b'01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = memoryview(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to include the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py | 1724 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
import warnings
import re
import sys
from . import _base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
infosetFilter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info.major == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\"" % (' ' * 2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\"" % (' ' * 2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
# self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our iniial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
|
mviitanen/marsmod | refs/heads/master | mcp/temp-python/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer/htmlserializer.py | 310 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
|
Edraak/circleci-edx-platform | refs/heads/circleci-master | lms/djangoapps/instructor/tests/test_enrollment_store_provider.py | 136 | """
Exercises tests on the base_store_provider file
"""
from django.test import TestCase
from instructor.enrollment_report import AbstractEnrollmentReportProvider
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
class BadImplementationAbstractEnrollmentReportProvider(AbstractEnrollmentReportProvider):
"""
Test implementation of EnrollmentProvider to assert that non-implementations of methods
raises the correct methods
"""
def get_user_profile(self, user_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_user_profile(user_id)
def get_enrollment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_enrollment_info(user, course_id)
def get_payment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_payment_info(user, course_id)
class TestBaseNotificationDataProvider(TestCase):
"""
Cover the EnrollmentReportProvider class
"""
def test_cannot_create_instance(self):
"""
EnrollmentReportProvider is an abstract class and we should not be able
to create an instance of it
"""
with self.assertRaises(TypeError):
# parent of the BaseEnrollmentReportProvider is EnrollmentReportProvider
super(BadImplementationAbstractEnrollmentReportProvider, self)
def test_get_provider(self):
"""
Makes sure we get an instance of the registered enrollment provider
"""
provider = PaidCourseEnrollmentReportProvider()
self.assertIsNotNone(provider)
self.assertTrue(isinstance(provider, PaidCourseEnrollmentReportProvider))
def test_base_methods_exceptions(self):
"""
Asserts that all base-methods on the EnrollmentProvider interface will throw
an NotImplementedError
"""
bad_provider = BadImplementationAbstractEnrollmentReportProvider()
with self.assertRaises(NotImplementedError):
bad_provider.get_enrollment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_payment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_user_profile(None)
|
vikingMei/mxnet | refs/heads/master | example/rcnn/rcnn/io/rcnn.py | 16 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Fast R-CNN:
data =
{'data': [num_images, c, h, w],
'rois': [num_rois, 5]}
label =
{'label': [num_rois],
'bbox_target': [num_rois, 4 * num_classes],
'bbox_weight': [num_rois, 4 * num_classes]}
roidb extended format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
import numpy as np
import numpy.random as npr
from ..config import config
from ..io.image import get_image, tensor_vstack
from ..processing.bbox_transform import bbox_overlaps, bbox_transform
from ..processing.bbox_regression import expand_bbox_regression_targets
def get_rcnn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
im_rois = roidb[0]['boxes']
rois = im_rois
batch_index = 0 * np.ones((rois.shape[0], 1))
rois_array = np.hstack((batch_index, rois))[np.newaxis, :]
data = {'data': im_array,
'rois': rois_array}
label = {}
return data, label, im_info
def get_rcnn_batch(roidb):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb)
im_array = tensor_vstack(imgs)
assert config.TRAIN.BATCH_ROIS % config.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(config.TRAIN.BATCH_IMAGES, config.TRAIN.BATCH_ROIS)
rois_per_image = config.TRAIN.BATCH_ROIS / config.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(config.TRAIN.FG_FRACTION * rois_per_image).astype(np.int)
rois_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
im_rois, labels, bbox_targets, bbox_weights = \
sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes,
labels, overlaps, bbox_targets)
# project im_rois
# do not round roi
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
# add labels
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
rois_array = np.array(rois_array)
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
data = {'data': im_array,
'rois': rois_array}
label = {'label': labels_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
return data, label
def sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = int(np.minimum(fg_rois_per_image, fg_indexes.size))
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = int(np.minimum(bg_rois_per_this_image, bg_indexes.size))
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
neg_idx = np.where(overlaps < config.TRAIN.FG_THRESH)[0]
neg_rois = rois[neg_idx]
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(neg_rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(neg_rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, neg_idx[gap_indexes])
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(config.TRAIN.BBOX_MEANS))
/ np.array(config.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes)
return rois, labels, bbox_targets, bbox_weights
|
wenderen/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/testing/test_junitxml.py | 166 | # -*- coding: utf-8 -*-
from xml.dom import minidom
from _pytest.main import EXIT_NOTESTSCOLLECTED
import py
import sys
import os
from _pytest.junitxml import LogXML
import pytest
def runandparse(testdir, *args):
resultpath = testdir.tmpdir.join("junit.xml")
result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
xmldoc = minidom.parse(str(resultpath))
return result, DomNode(xmldoc)
def assert_attr(node, **kwargs):
__tracebackhide__ = True
def nodeval(node, name):
anode = node.getAttributeNode(name)
if anode is not None:
return anode.value
expected = dict((name, str(value)) for name, value in kwargs.items())
on_node = dict((name, nodeval(node, name)) for name in expected)
assert on_node == expected
class DomNode(object):
def __init__(self, dom):
self.__node = dom
def __repr__(self):
return self.__node.toxml()
def find_first_by_tag(self, tag):
return self.find_nth_by_tag(tag, 0)
def _by_tag(self, tag):
return self.__node.getElementsByTagName(tag)
def find_nth_by_tag(self, tag, n):
items = self._by_tag(tag)
try:
nth = items[n]
except IndexError:
pass
else:
return type(self)(nth)
def find_by_tag(self, tag):
t = type(self)
return [t(x) for x in self.__node.getElementsByTagName(tag)]
def __getitem__(self, key):
node = self.__node.getAttributeNode(key)
if node is not None:
return node.value
def assert_attr(self, **kwargs):
__tracebackhide__ = True
return assert_attr(self.__node, **kwargs)
def toxml(self):
return self.__node.toxml()
@property
def text(self):
return self.__node.childNodes[0].wholeText
@property
def tag(self):
return self.__node.tagName
@property
def next_siebling(self):
return type(self)(self.__node.nextSibling)
class TestPython:
def test_summing_simple(self, testdir):
testdir.makepyfile("""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail
def test_xpass():
assert 1
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=2)
def test_timing_function(self, testdir):
testdir.makepyfile("""
import time, pytest
def setup_module():
time.sleep(0.01)
def teardown_module():
time.sleep(0.01)
def test_sleep():
time.sleep(0.01)
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
val = tnode["time"]
assert round(float(val), 2) >= 0.03
def test_setup_error(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg(request):
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_setup_error.py",
line="2",
classname="test_setup_error",
name="test_function")
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="test setup failure")
assert "ValueError" in fnode.toxml()
def test_skip_contains_name_reason(self, testdir):
testdir.makepyfile("""
import pytest
def test_skip():
pytest.skip("hello23")
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_skip_contains_name_reason.py",
line="1",
classname="test_skip_contains_name_reason",
name="test_skip")
snode = tnode.find_first_by_tag("skipped")
snode.assert_attr(type="pytest.skip", message="hello23", )
def test_classname_instance(self, testdir):
testdir.makepyfile("""
class TestClass:
def test_method(self):
assert 0
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_classname_instance.py",
line="1",
classname="test_classname_instance.TestClass",
name="test_method")
def test_classname_nested_dir(self, testdir):
p = testdir.tmpdir.ensure("sub", "test_hello.py")
p.write("def test_func(): 0/0")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file=os.path.join("sub", "test_hello.py"),
line="0",
classname="sub.test_hello",
name="test_func")
def test_internal_error(self, testdir):
testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
testdir.makepyfile("def test_function(): pass")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(classname="pytest", name="internal")
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="internal error")
assert "Division" in fnode.toxml()
def test_failure_function(self, testdir):
testdir.makepyfile("""
import sys
def test_fail():
print ("hello-stdout")
sys.stderr.write("hello-stderr\\n")
raise ValueError(42)
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_failure_function.py",
line="1",
classname="test_failure_function",
name="test_fail")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="ValueError: 42")
assert "ValueError" in fnode.toxml()
systemout = fnode.next_siebling
assert systemout.tag == "system-out"
assert "hello-stdout" in systemout.toxml()
systemerr = systemout.next_siebling
assert systemerr.tag == "system-err"
assert "hello-stderr" in systemerr.toxml()
def test_failure_verbose_message(self, testdir):
testdir.makepyfile("""
import sys
def test_fail():
assert 0, "An error"
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="AssertionError: An error assert 0")
def test_failure_escape(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('arg1', "<&'", ids="<&'")
def test_func(arg1):
print(arg1)
assert 0
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=3, tests=3)
for index, char in enumerate("<&'"):
tnode = node.find_nth_by_tag("testcase", index)
tnode.assert_attr(
file="test_failure_escape.py",
line="1",
classname="test_failure_escape",
name="test_func[%s]" % char)
sysout = tnode.find_first_by_tag('system-out')
text = sysout.text
assert text == '%s\n' % char
def test_junit_prefixing(self, testdir):
testdir.makepyfile("""
def test_func():
assert 0
class TestHello:
def test_hello(self):
pass
""")
result, dom = runandparse(testdir, "--junitprefix=xyz")
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=2)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_junit_prefixing.py",
line="0",
classname="xyz.test_junit_prefixing",
name="test_func")
tnode = node.find_nth_by_tag("testcase", 1)
tnode.assert_attr(
file="test_junit_prefixing.py",
line="3",
classname="xyz.test_junit_prefixing."
"TestHello",
name="test_hello")
def test_xfailure_function(self, testdir):
testdir.makepyfile("""
import pytest
def test_xfail():
pytest.xfail("42")
""")
result, dom = runandparse(testdir)
assert not result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_function.py",
line="1",
classname="test_xfailure_function",
name="test_xfail")
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="expected test failure")
# assert "ValueError" in fnode.toxml()
def test_xfailure_xpass(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_xpass():
pass
""")
result, dom = runandparse(testdir)
# assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_xpass.py",
line="1",
classname="test_xfailure_xpass",
name="test_xpass")
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="xfail-marked test passes unexpectedly")
# assert "ValueError" in fnode.toxml()
def test_collect_error(self, testdir):
testdir.makepyfile("syntax error")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_collect_error.py",
name="test_collect_error")
assert tnode["line"] is None
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="collection failure")
assert "SyntaxError" in fnode.toxml()
def test_collect_skipped(self, testdir):
testdir.makepyfile("import pytest; pytest.skip('xyz')")
result, dom = runandparse(testdir)
assert result.ret == EXIT_NOTESTSCOLLECTED
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_collect_skipped.py",
name="test_collect_skipped")
# py.test doesn't give us a line here.
assert tnode["line"] is None
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="collection skipped")
def test_unicode(self, testdir):
value = 'hx\xc4\x85\xc4\x87\n'
testdir.makepyfile("""
# coding: latin1
def test_hello():
print (%r)
assert 0
""" % value)
result, dom = runandparse(testdir)
assert result.ret == 1
tnode = dom.find_first_by_tag("testcase")
fnode = tnode.find_first_by_tag("failure")
if not sys.platform.startswith("java"):
assert "hx" in fnode.toxml()
def test_assertion_binchars(self, testdir):
"""this test did fail when the escaping wasnt strict"""
testdir.makepyfile("""
M1 = '\x01\x02\x03\x04'
M2 = '\x01\x02\x03\x05'
def test_str_compare():
assert M1 == M2
""")
result, dom = runandparse(testdir)
print(dom.toxml())
def test_pass_captures_stdout(self, testdir):
testdir.makepyfile("""
def test_pass():
print('hello-stdout')
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml()
def test_pass_captures_stderr(self, testdir):
testdir.makepyfile("""
import sys
def test_pass():
sys.stderr.write('hello-stderr')
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-err")
assert "hello-stderr" in systemout.toxml()
def test_setup_error_captures_stdout(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg(request):
print('hello-stdout')
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml()
def test_setup_error_captures_stderr(self, testdir):
testdir.makepyfile("""
import sys
def pytest_funcarg__arg(request):
sys.stderr.write('hello-stderr')
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-err")
assert "hello-stderr" in systemout.toxml()
def test_mangle_test_address():
from _pytest.junitxml import mangle_test_address
address = '::'.join(
["a/my.py.thing.py", "Class", "()", "method", "[a-1-::]"])
newnames = mangle_test_address(address)
assert newnames == ["a.my.py.thing", "Class", "method", "[a-1-::]"]
def test_dont_configure_on_slaves(tmpdir):
gotten = []
class FakeConfig:
def __init__(self):
self.pluginmanager = self
self.option = self
junitprefix = None
# XXX: shouldnt need tmpdir ?
xmlpath = str(tmpdir.join('junix.xml'))
register = gotten.append
fake_config = FakeConfig()
from _pytest import junitxml
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
FakeConfig.slaveinput = None
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
class TestNonPython:
def test_summing_simple(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
if path.ext == ".xyz":
return MyItem(path, parent)
class MyItem(pytest.Item):
def __init__(self, path, parent):
super(MyItem, self).__init__(path.basename, parent)
self.fspath = path
def runtest(self):
raise ValueError(42)
def repr_failure(self, excinfo):
return "custom item runtest failed"
""")
testdir.tmpdir.join("myfile.xyz").write("hello")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=0, failures=1, skips=0, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(name="myfile.xyz")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="custom item runtest failed")
assert "custom item runtest failed" in fnode.toxml()
def test_nullbyte(testdir):
# A null byte can not occur in XML (see section 2.2 of the spec)
testdir.makepyfile("""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
""")
xmlf = testdir.tmpdir.join('junit.xml')
testdir.runpytest('--junitxml=%s' % xmlf)
text = xmlf.read()
assert '\x00' not in text
assert '#x00' in text
def test_nullbyte_replace(testdir):
# Check if the null byte gets replaced
testdir.makepyfile("""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
""")
xmlf = testdir.tmpdir.join('junit.xml')
testdir.runpytest('--junitxml=%s' % xmlf)
text = xmlf.read()
assert '#x0' in text
def test_invalid_xml_escape():
# Test some more invalid xml chars, the full range should be
# tested really but let's just thest the edges of the ranges
# intead.
# XXX This only tests low unicode character points for now as
# there are some issues with the testing infrastructure for
# the higher ones.
# XXX Testing 0xD (\r) is tricky as it overwrites the just written
# line in the output, so we skip it too.
global unichr
try:
unichr(65)
except NameError:
unichr = chr
invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19, 27, # issue #126
0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) # , 0x110000)
valid = (0x9, 0xA, 0x20, )
# 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
from _pytest.junitxml import bin_xml_escape
for i in invalid:
got = bin_xml_escape(unichr(i)).uniobj
if i <= 0xFF:
expected = '#x%02X' % i
else:
expected = '#x%04X' % i
assert got == expected
for i in valid:
assert chr(i) == bin_xml_escape(unichr(i)).uniobj
def test_logxml_path_expansion(tmpdir, monkeypatch):
home_tilde = py.path.local(os.path.expanduser('~')).join('test.xml')
xml_tilde = LogXML('~%stest.xml' % tmpdir.sep, None)
assert xml_tilde.logfile == home_tilde
# this is here for when $HOME is not set correct
monkeypatch.setenv("HOME", tmpdir)
home_var = os.path.normpath(os.path.expandvars('$HOME/test.xml'))
xml_var = LogXML('$HOME%stest.xml' % tmpdir.sep, None)
assert xml_var.logfile == home_var
def test_logxml_changingdir(testdir):
testdir.makepyfile("""
def test_func():
import os
os.chdir("a")
""")
testdir.tmpdir.mkdir("a")
result = testdir.runpytest("--junitxml=a/x.xml")
assert result.ret == 0
assert testdir.tmpdir.join("a/x.xml").check()
def test_logxml_makedir(testdir):
"""--junitxml should automatically create directories for the xml file"""
testdir.makepyfile("""
def test_pass():
pass
""")
result = testdir.runpytest("--junitxml=path/to/results.xml")
assert result.ret == 0
assert testdir.tmpdir.join("path/to/results.xml").check()
def test_escaped_parametrized_names_xml(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('char', ["\\x00"])
def test_func(char):
assert char
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(name="test_func[#x00]")
def test_double_colon_split_function_issue469(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(param):
pass
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(classname="test_double_colon_split_function_issue469")
node.assert_attr(name='test_func[double::colon]')
def test_double_colon_split_method_issue469(testdir):
testdir.makepyfile("""
import pytest
class TestClass:
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(self, param):
pass
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(
classname="test_double_colon_split_method_issue469.TestClass")
node.assert_attr(name='test_func[double::colon]')
def test_unicode_issue368(testdir):
path = testdir.tmpdir.join("test.xml")
log = LogXML(str(path), None)
ustr = py.builtin._totext("ВНИ!", "utf-8")
from _pytest.runner import BaseReport
class Report(BaseReport):
longrepr = ustr
sections = []
nodeid = "something"
location = 'tests/filename.py', 42, 'TestClass.method'
test_report = Report()
# hopefully this is not too brittle ...
log.pytest_sessionstart()
node_reporter = log._opentestcase(test_report)
node_reporter.append_failure(test_report)
node_reporter.append_collect_error(test_report)
node_reporter.append_collect_skipped(test_report)
node_reporter.append_error(test_report)
test_report.longrepr = "filename", 1, ustr
node_reporter.append_skipped(test_report)
test_report.longrepr = "filename", 1, "Skipped: 卡嘣嘣"
node_reporter.append_skipped(test_report)
test_report.wasxfail = ustr
node_reporter.append_skipped(test_report)
log.pytest_sessionfinish()
def test_record_property(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def other(record_xml_property):
record_xml_property("bar", 1)
def test_record(record_xml_property, other):
record_xml_property("foo", "<1");
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
psnode = tnode.find_first_by_tag('properties')
pnodes = psnode.find_by_tag('property')
pnodes[0].assert_attr(name="bar", value="1")
pnodes[1].assert_attr(name="foo", value="<1")
result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*')
def test_record_property_same_name(testdir):
testdir.makepyfile("""
def test_record_with_same_name(record_xml_property):
record_xml_property("foo", "bar")
record_xml_property("foo", "baz")
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
psnode = tnode.find_first_by_tag('properties')
pnodes = psnode.find_by_tag('property')
pnodes[0].assert_attr(name="foo", value="bar")
pnodes[1].assert_attr(name="foo", value="baz")
def test_random_report_log_xdist(testdir):
"""xdist calls pytest_runtest_logreport as they are executed by the slaves,
with nodes from several nodes overlapping, so junitxml must cope with that
to produce correct reports. #1064
"""
pytest.importorskip('xdist')
testdir.makepyfile("""
import pytest, time
@pytest.mark.parametrize('i', list(range(30)))
def test_x(i):
assert i != 22
""")
_, dom = runandparse(testdir, '-n2')
suite_node = dom.find_first_by_tag("testsuite")
failed = []
for case_node in suite_node.find_by_tag("testcase"):
if case_node.find_first_by_tag('failure'):
failed.append(case_node['name'])
assert failed == ['test_x[22]']
def test_runs_twice(testdir):
f = testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(testdir, f, f)
assert 'INTERNALERROR' not in result.stdout.str()
first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
assert first == second
@pytest.mark.xfail(reason='hangs', run=False)
def test_runs_twice_xdist(testdir):
pytest.importorskip('xdist')
f = testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(
testdir, f,
'--dist', 'each', '--tx', '2*popen',)
assert 'INTERNALERROR' not in result.stdout.str()
first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
assert first == second
def test_fancy_items_regression(testdir):
# issue 1259
testdir.makeconftest("""
import pytest
class FunItem(pytest.Item):
def runtest(self):
pass
class NoFunItem(pytest.Item):
def runtest(self):
pass
class FunCollector(pytest.File):
def collect(self):
return [
FunItem('a', self),
NoFunItem('a', self),
NoFunItem('b', self),
]
def pytest_collect_file(path, parent):
if path.check(ext='.py'):
return FunCollector(path, parent)
""")
testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(testdir)
assert 'INTERNALERROR' not in result.stdout.str()
items = sorted(
'%(classname)s %(name)s %(file)s' % x
for x in dom.find_by_tag("testcase"))
import pprint
pprint.pprint(items)
assert items == [
u'conftest a conftest.py',
u'conftest a conftest.py',
u'conftest b conftest.py',
u'test_fancy_items_regression a test_fancy_items_regression.py',
u'test_fancy_items_regression a test_fancy_items_regression.py',
u'test_fancy_items_regression b test_fancy_items_regression.py',
u'test_fancy_items_regression test_pass'
u' test_fancy_items_regression.py',
]
|
MOZGIII/mpv | refs/heads/master | waftools/detections/devices.py | 51 | __cdrom_devices_map__ = {
'win32': 'D:',
'cygwin': 'D:',
'darwin': '/dev/disk1',
'freebsd': '/dev/cd0',
'openbsd': '/dev/rcd0c',
'linux': '/dev/sr0',
'default': '/dev/cdrom'
}
__dvd_devices_map__ = {
'win32': 'D:',
'cygwin': 'D:',
'darwin': '/dev/rdiskN',
'freebsd': '/dev/cd0',
'openbsd': '/dev/rcd0c',
'linux': '/dev/sr0',
'default': '/dev/dvd'
}
def __default_cdrom_device__(ctx):
default = __cdrom_devices_map__['default']
return __cdrom_devices_map__.get(ctx.env.DEST_OS, default)
def __default_dvd_device__(ctx):
default = __dvd_devices_map__['default']
return __dvd_devices_map__.get(ctx.env.DEST_OS, default)
def configure(ctx):
ctx.define('DEFAULT_DVD_DEVICE', __default_dvd_device__(ctx))
ctx.define('DEFAULT_CDROM_DEVICE', __default_cdrom_device__(ctx))
|
toslunar/chainerrl | refs/heads/master | chainerrl/agents/a3c.py | 1 | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import copy
from logging import getLogger
import chainer
from chainer import functions as F
import numpy as np
from chainerrl import agent
from chainerrl.misc import async_
from chainerrl.misc.batch_states import batch_states
from chainerrl.misc import copy_param
from chainerrl.recurrent import Recurrent
from chainerrl.recurrent import RecurrentChainMixin
from chainerrl.recurrent import state_kept
logger = getLogger(__name__)
class A3CModel(chainer.Link):
"""A3C model."""
def pi_and_v(self, obs):
"""Evaluate the policy and the V-function.
Args:
obs (Variable or ndarray): Batched observations.
Returns:
Distribution and Variable
"""
raise NotImplementedError()
def __call__(self, obs):
return self.pi_and_v(obs)
class A3CSeparateModel(chainer.Chain, A3CModel, RecurrentChainMixin):
"""A3C model that consists of a separate policy and V-function.
Args:
pi (Policy): Policy.
v (VFunction): V-function.
"""
def __init__(self, pi, v):
super().__init__(pi=pi, v=v)
def pi_and_v(self, obs):
pout = self.pi(obs)
vout = self.v(obs)
return pout, vout
class A3CSharedModel(chainer.Chain, A3CModel, RecurrentChainMixin):
"""A3C model where the policy and V-function share parameters.
Args:
shared (Link): Shared part. Nonlinearity must be included in it.
pi (Policy): Policy that receives output of shared as input.
v (VFunction): V-function that receives output of shared as input.
"""
def __init__(self, shared, pi, v):
super().__init__(shared=shared, pi=pi, v=v)
def pi_and_v(self, obs):
h = self.shared(obs)
pout = self.pi(h)
vout = self.v(h)
return pout, vout
class A3C(agent.AttributeSavingMixin, agent.AsyncAgent):
"""A3C: Asynchronous Advantage Actor-Critic.
See http://arxiv.org/abs/1602.01783
Args:
model (A3CModel): Model to train
optimizer (chainer.Optimizer): optimizer used to train the model
t_max (int): The model is updated after every t_max local steps
gamma (float): Discount factor [0,1]
beta (float): Weight coefficient for the entropy regularizaiton term.
process_idx (int): Index of the process.
phi (callable): Feature extractor function
pi_loss_coef (float): Weight coefficient for the loss of the policy
v_loss_coef (float): Weight coefficient for the loss of the value
function
act_deterministically (bool): If set true, choose most probable actions
in act method.
batch_states (callable): method which makes a batch of observations.
default is `chainerrl.misc.batch_states.batch_states`
"""
process_idx = None
saved_attributes = ['model', 'optimizer']
def __init__(self, model, optimizer, t_max, gamma, beta=1e-2,
process_idx=0, phi=lambda x: x,
pi_loss_coef=1.0, v_loss_coef=0.5,
keep_loss_scale_same=False,
normalize_grad_by_t_max=False,
use_average_reward=False, average_reward_tau=1e-2,
act_deterministically=False,
average_entropy_decay=0.999,
average_value_decay=0.999,
batch_states=batch_states):
assert isinstance(model, A3CModel)
# Globally shared model
self.shared_model = model
# Thread specific model
self.model = copy.deepcopy(self.shared_model)
async_.assert_params_not_shared(self.shared_model, self.model)
self.optimizer = optimizer
self.t_max = t_max
self.gamma = gamma
self.beta = beta
self.phi = phi
self.pi_loss_coef = pi_loss_coef
self.v_loss_coef = v_loss_coef
self.keep_loss_scale_same = keep_loss_scale_same
self.normalize_grad_by_t_max = normalize_grad_by_t_max
self.use_average_reward = use_average_reward
self.average_reward_tau = average_reward_tau
self.act_deterministically = act_deterministically
self.average_value_decay = average_value_decay
self.average_entropy_decay = average_entropy_decay
self.batch_states = batch_states
self.t = 0
self.t_start = 0
self.past_action_log_prob = {}
self.past_action_entropy = {}
self.past_states = {}
self.past_rewards = {}
self.past_values = {}
self.average_reward = 0
# A3C won't use a explorer, but this arrtibute is referenced by run_dqn
self.explorer = None
# Stats
self.average_value = 0
self.average_entropy = 0
def sync_parameters(self):
copy_param.copy_param(target_link=self.model,
source_link=self.shared_model)
@property
def shared_attributes(self):
return ('shared_model', 'optimizer')
def update(self, statevar):
assert self.t_start < self.t
if statevar is None:
R = 0
else:
with state_kept(self.model):
_, vout = self.model.pi_and_v(statevar)
R = float(vout.array)
pi_loss = 0
v_loss = 0
for i in reversed(range(self.t_start, self.t)):
R *= self.gamma
R += self.past_rewards[i]
if self.use_average_reward:
R -= self.average_reward
v = self.past_values[i]
advantage = R - v
if self.use_average_reward:
self.average_reward += self.average_reward_tau * \
float(advantage.array)
# Accumulate gradients of policy
log_prob = self.past_action_log_prob[i]
entropy = self.past_action_entropy[i]
# Log probability is increased proportionally to advantage
pi_loss -= log_prob * float(advantage.array)
# Entropy is maximized
pi_loss -= self.beta * entropy
# Accumulate gradients of value function
v_loss += (v - R) ** 2 / 2
if self.pi_loss_coef != 1.0:
pi_loss *= self.pi_loss_coef
if self.v_loss_coef != 1.0:
v_loss *= self.v_loss_coef
# Normalize the loss of sequences truncated by terminal states
if self.keep_loss_scale_same and \
self.t - self.t_start < self.t_max:
factor = self.t_max / (self.t - self.t_start)
pi_loss *= factor
v_loss *= factor
if self.normalize_grad_by_t_max:
pi_loss /= self.t - self.t_start
v_loss /= self.t - self.t_start
if self.process_idx == 0:
logger.debug('pi_loss:%s v_loss:%s', pi_loss.array, v_loss.array)
total_loss = F.squeeze(pi_loss) + F.squeeze(v_loss)
# Compute gradients using thread-specific model
self.model.zerograds()
total_loss.backward()
# Copy the gradients to the globally shared model
self.shared_model.zerograds()
copy_param.copy_grad(
target_link=self.shared_model, source_link=self.model)
# Update the globally shared model
if self.process_idx == 0:
norm = sum(np.sum(np.square(param.grad))
for param in self.optimizer.target.params())
logger.debug('grad norm:%s', norm)
self.optimizer.update()
if self.process_idx == 0:
logger.debug('update')
self.sync_parameters()
if isinstance(self.model, Recurrent):
self.model.unchain_backward()
self.past_action_log_prob = {}
self.past_action_entropy = {}
self.past_states = {}
self.past_rewards = {}
self.past_values = {}
self.t_start = self.t
def act_and_train(self, obs, reward):
statevar = self.batch_states([obs], np, self.phi)
self.past_rewards[self.t - 1] = reward
if self.t - self.t_start == self.t_max:
self.update(statevar)
self.past_states[self.t] = statevar
pout, vout = self.model.pi_and_v(statevar)
action = pout.sample().array # Do not backprop through sampled actions
self.past_action_log_prob[self.t] = pout.log_prob(action)
self.past_action_entropy[self.t] = pout.entropy
self.past_values[self.t] = vout
self.t += 1
action = action[0]
if self.process_idx == 0:
logger.debug('t:%s r:%s a:%s pout:%s',
self.t, reward, action, pout)
# Update stats
self.average_value += (
(1 - self.average_value_decay) *
(float(vout.array[0]) - self.average_value))
self.average_entropy += (
(1 - self.average_entropy_decay) *
(float(pout.entropy.array[0]) - self.average_entropy))
return action
def act(self, obs):
# Use the process-local model for acting
with chainer.no_backprop_mode():
statevar = self.batch_states([obs], np, self.phi)
pout, _ = self.model.pi_and_v(statevar)
if self.act_deterministically:
return pout.most_probable.array[0]
else:
return pout.sample().array[0]
def stop_episode_and_train(self, state, reward, done=False):
self.past_rewards[self.t - 1] = reward
if done:
self.update(None)
else:
statevar = self.batch_states([state], np, self.phi)
self.update(statevar)
if isinstance(self.model, Recurrent):
self.model.reset_state()
def stop_episode(self):
if isinstance(self.model, Recurrent):
self.model.reset_state()
def load(self, dirname):
super().load(dirname)
copy_param.copy_param(target_link=self.shared_model,
source_link=self.model)
def get_statistics(self):
return [
('average_value', self.average_value),
('average_entropy', self.average_entropy),
]
|
LANGFAN/ardupilot | refs/heads/ekf2_34 | Tools/scripts/magfit_flashlog.py | 278 | #!/usr/bin/env python
''' fit best estimate of magnetometer offsets from ArduCopter flashlog
using the algorithm from Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_flashlog.py [options]")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_flashlog.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def plot_corrected_field(filename, data, offsets):
f = open(filename, mode='w')
for d in data:
corrected = d + offsets
f.write("%.1f\n" % corrected.length())
f.close()
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
flog = open(filename, mode='r')
data = []
data_no_motors = []
mag = None
offsets = None
# now gather all the data
for line in flog:
if not line.startswith('COMPASS,'):
continue
line = line.rstrip()
line = line.replace(' ', '')
a = line.split(',')
ofs = Vector3(float(a[4]), float(a[5]), float(a[6]))
if offsets is None:
initial_offsets = ofs
offsets = ofs
motor_ofs = Vector3(float(a[7]), float(a[8]), float(a[9]))
mag = Vector3(float(a[1]), float(a[2]), float(a[3]))
mag = mag - offsets
data.append(mag)
data_no_motors.append(mag - motor_ofs)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % initial_offsets)
# run the fitting algorithm
ofs = initial_offsets
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
plot_corrected_field('plot.dat', data, ofs)
plot_corrected_field('initial.dat', data, initial_offsets)
plot_corrected_field('zero.dat', data, Vector3(0,0,0))
plot_corrected_field('hand.dat', data, Vector3(-25,-8,-2))
plot_corrected_field('zero-no-motors.dat', data_no_motors, Vector3(0,0,0))
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
|
jswope00/griffinx | refs/heads/master | common/djangoapps/static_replace/management/commands/clear_collectstatic_cache.py | 114 | ###
### Script for importing courseware from XML format
###
from django.core.management.base import NoArgsCommand
from django.core.cache import get_cache
class Command(NoArgsCommand):
help = 'Import the specified data directory into the default ModuleStore'
def handle_noargs(self, **options):
staticfiles_cache = get_cache('staticfiles')
staticfiles_cache.clear()
|
ahmed-mahran/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/tests/test_io.py | 105 | from __future__ import unicode_literals
import binascii
import unittest
from django.contrib.gis import memoryview
from django.utils.unittest import skipUnless
from ..import HAS_GEOS
if HAS_GEOS:
from .. import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt.encode())
g2 = wkt_r.read(wkt)
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept six.string_types objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, memoryview(b'foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref).decode())
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = b'000000000140140000000000004037000000000000'
wkb = memoryview(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = b'010100000000000000000014400000000000003740'
wkb1 = memoryview(binascii.a2b_hex(hex1))
hex2 = b'000000000140140000000000004037000000000000'
wkb2 = memoryview(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = b'0101000080000000000000144000000000000037400000000000003140'
wkb3d = memoryview(binascii.a2b_hex(hex3d))
hex3d_srid = b'01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = memoryview(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to include the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
|
sporkchops81/titleplant | refs/heads/master | lib/python3.5/site-packages/setuptools/command/upload_docs.py | 390 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
from pkg_resources import iter_entry_points
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
demon-ru/iml-crm | refs/heads/master | addons/sale/sales_team.py | 61 | # -*- coding: utf-8 -*-
import calendar
from datetime import date
from dateutil import relativedelta
import json
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.osv):
_inherit = 'crm.case.section'
def _get_sale_orders_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['sale.order']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
res[id] = {}
created_domain = [('section_id', '=', id), ('state', '=', 'draft'), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
validated_domain = [('section_id', '=', id), ('state', 'not in', ['draft', 'sent', 'cancel']), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
res[id]['monthly_quoted'] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, ['amount_total', 'date_order'], 'amount_total', 'date_order', context=context))
res[id]['monthly_confirmed'] = json.dumps(self.__get_bar_values(cr, uid, obj, validated_domain, ['amount_total', 'date_order'], 'amount_total', 'date_order', context=context))
return res
def _get_invoices_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['account.invoice.report']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('section_id', '=', id), ('state', 'not in', ['draft', 'cancel']), ('date', '>=', date_begin), ('date', '<=', date_end)]
res[id] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, ['price_total', 'date'], 'price_total', 'date', context=context))
return res
_columns = {
'use_quotations': fields.boolean('Quotations', help="Check this box to manage quotations in this sales team."),
'invoiced_forecast': fields.integer(string='Invoice Forecast',
help="Forecast of the invoice revenue for the current month. This is the amount the sales \n"
"team should invoice this month. It is used to compute the progression ratio \n"
" of the current and forecast revenue on the kanban view."),
'invoiced_target': fields.integer(string='Invoice Target',
help="Target of invoice revenue for the current month. This is the amount the sales \n"
"team estimates to be able to invoice this month."),
'monthly_quoted': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of created quotation per duration'),
'monthly_confirmed': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of validate sales orders per duration'),
'monthly_invoiced': fields.function(_get_invoices_data,
type='char', readonly=True,
string='Rate of sent invoices per duration'),
}
_defaults = {
'use_quotations': True,
}
def action_forecast(self, cr, uid, id, value, context=None):
return self.write(cr, uid, [id], {'invoiced_forecast': round(float(value))}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
consulo/consulo-python | refs/heads/master | plugin/src/test/resources/intentions/beforeConvertStaticMethodToFunction.py | 83 | class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self.a = 1
@staticmethod
def my_<caret>static_method():
import code
import time
time.sleep(100)
print code
|
zouyapeng/horizon-newtouch | refs/heads/master | openstack_dashboard/dashboards/project/data_processing/data_sources/tabs.py | 36 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "data_source_details_tab"
template_name = ("project/data_processing.data_sources/_details.html")
def get_context_data(self, request):
data_source_id = self.tab_group.kwargs['data_source_id']
try:
data_source = saharaclient.data_source_get(request,
data_source_id)
except Exception:
exceptions.handle(self.tab_group.request,
_("Unable to retrieve data source details"))
data_source = {}
return {"data_source": data_source}
class DataSourceDetailsTabs(tabs.TabGroup):
slug = "data_source_details"
tabs = (GeneralTab,)
sticky = True
|
westernx/vee | refs/heads/master | vee/commands/relocate.py | 1 | import os
import stat
from vee.cli import style
from vee.commands.main import command, argument
from vee.home import PRIMARY_REPO
from vee import libs
from vee import log
@command(
argument('-n', '--dry-run', action='store_true'),
argument('--scan', action='store_true', help='look for installed libraires'),
argument('--rescan', action='store_true', help='redo previous scans'),
argument('--spec', default='AUTO'),
argument('path', nargs='?'),
help='relocate a package',
group='plumbing',
)
def relocate(args):
home = args.assert_home()
if args.scan or args.rescan:
rows = list(home.db.execute(
'''SELECT id, install_path FROM packages
ORDER BY created_at DESC
'''))
seen_paths = set()
for row in rows:
package_id, install_path = row
if not os.path.exists(install_path):
continue
if install_path in seen_paths:
continue
seen_paths.add(install_path)
print install_path
found = libs.get_installed_shared_libraries(home.db.connect(), package_id, install_path, rescan=args.rescan)
for lib in found:
print ' ' + lib
return
if args.path:
con = home.db.connect()
target_cache = {}
libs.relocate(os.path.abspath(args.path), con, spec=args.spec, dry_run=args.dry_run, target_cache=target_cache)
|
annarev/tensorflow | refs/heads/master | tensorflow/python/saved_model/function_deserialization.py | 3 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))
# TODO(edloper): Update this to just use ConcreteFunction.__call__ with the
# structured signature.
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(
expected_structure, inputs, expand_composites=True)
flatten_expected = nest.flatten(expected_structure, expand_composites=True)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, flatten_expected):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid polluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
elif isinstance(expected, type_spec.TypeSpec):
if not expected.is_compatible_with(arg):
return False
elif _is_tensor(arg):
if id(arg) != id(expected):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
# Convert a method function into a non method.
if function_spec_proto.is_method:
if not typeless_fullargspec.args:
raise NotImplementedError(
"Missing support to deserialize a method function without a named "
"'self' argument.")
args = typeless_fullargspec.args[1:]
else:
args = typeless_fullargspec.args
fullargspec = tf_inspect.FullArgSpec(
args=args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
# See `tf.function` and the JitCompile proto for details.
jit_compile = {
saved_object_graph_pb2.FunctionSpec.JitCompile.DEFAULT: None,
saved_object_graph_pb2.FunctionSpec.JitCompile.ON: True,
saved_object_graph_pb2.FunctionSpec.JitCompile.OFF: False,
}.get(function_spec_proto.jit_compile)
return function_lib.FunctionSpec(fullargspec=fullargspec,
is_method=False,
input_signature=input_signature,
jit_compile=jit_compile)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
if saved_bare_concrete_function.HasField("function_spec"):
coder = nested_structure_coder.StructureCoder()
function_spec = _deserialize_function_spec_as_nonmethod(
saved_bare_concrete_function.function_spec,
coder)
concrete_function._set_function_spec(function_spec)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False,
jit_compile=function_spec.jit_compile)
self.concrete_functions = concrete_functions
self._function_spec = function_spec
# Prevent RestoredFunction from spamming users with frequent tracing
# warnings.
self._omit_frequent_tracing_warning = True
@property
def _run_functions_eagerly(self):
# We do not have access to the original python function, and thus, we
# cannot meaningfully do anything but call our concrete function graphs
# under the hood.
#
# Attempting to call our bespoke python function (i.e.
# `restored_function_body`) will work so long as the user passes in all
# required and optional arguments. If an optional argument is missing,
# however, the call will break. For this reason, we instead skip the
# eager call path altogether if a user has enabled eager function execution
# via `tf.config.run_functions_eagerly`.
return False
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def _defun_with_scope(self, scope):
func = super(RestoredFunction, self)._defun_with_scope(scope)
func._function_spec = self._function_spec # pylint: disable=protected-access
return func
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
As a side effect of this function, the `FunctionSpec` from
`saved_function` is added to each `ConcreteFunction` in this map.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function or raises an error if no matching function."""
if not saved_function.concrete_functions:
raise ValueError("Found zero restored functions for caller function.")
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional), "\n * ".join(str(a) for a in positional))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}"
.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching function to call loaded from the SavedModel. "
"Got:\n {}\n Keyword arguments: {}\n\nExpected "
"these arguments to match one of the following {} option(s):\n\n{}"
.format(_pretty_format_positional(args), kwargs,
len(saved_function.concrete_functions),
"\n\n".join(signature_descriptions)))
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
for cf in concrete_function_objects:
cf._set_function_spec(function_spec) # pylint: disable=protected-access
restored_function = RestoredFunction(
restored_function_body,
restored_function_body.__name__,
function_spec,
concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library, load_shared_name_suffix=None):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
load_shared_name_suffix: If specified, used to uniquify shared
names. Otherwise, a unique name is generated.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
library_function_names = set(fdef.signature.name for fdef in library.function)
functions = {}
renamed_functions = {}
# Our graph building code currently requires functions to be registered with
# some tf.Graph in order to import functions using the
# op-name-is-function-name calling convention. To avoid leaking memory into
# the global default graph when executing eagerly, we create a temporary
# Graph.
#
# TODO(allenl): Make this Graph creation unnecessary when executing eagerly by
# fixing function_def_to_graph_def.
if ops.executing_eagerly_outside_functions():
graph = ops.Graph()
else:
graph = ops.get_default_graph()
if load_shared_name_suffix is None:
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library, library_function_names):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
# There is no need to copy all functions into the function def graph. It
# leads to a O(n^2) increase of memory when importing functions and the
# extra function definitions are a no-op since they already imported as a
# function before and passed in explicitly (due to the topologic sort
# import).
with graph.as_default():
func_graph = function_def_lib.function_def_to_graph(copy)
_restore_gradient_functions(func_graph, renamed_functions)
for dep in _list_function_deps(fdef, library_function_names):
functions[dep].add_to_graph(func_graph)
# We do not initialize the new ConcreteFunction's function_spec and/or
# arg_keywords here (which are used to parse the structured and flat
# signatures, respectively). ConcreteFunction that are part of a saved
# function is set up later by recreate_function(); and bare ConcreteFunction
# is set up by by setup_bare_concrete_function().
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph(graph)
functions[fdef.signature.name] = func
renamed_functions[func.name] = func
if any(op.type == "TRTEngineOp" for op in func_graph.get_operations()):
# TODO(b/150708051): Remove this hack once TensorRT SavedModel integration
# is fixed. Currently it's leaking memory to maintain bug compatibility
# with previous behavior.
func.add_to_graph(ops.get_default_graph())
return functions
def _restore_gradient_functions(func_graph, renamed_functions):
"""Populate function op's _gradient_function with default gradient."""
for op in func_graph.get_operations():
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a custom
# one.
if op.type in ["StatefulPartitionedCall", "PartitionedCall"]:
function = renamed_functions[compat.as_bytes(
op.node_def.attr["f"].func.name)]
op._gradient_function = function._get_gradient_function() # pylint: disable=protected-access
def _sort_function_defs(library, library_function_names):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef, library_function_names):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _check_op_has_custom_gradients(node_def):
"""Returns True if op has custom gradients."""
return ("_gradient_op_type" in node_def.attr and
node_def.op not in ["StatefulPartitionedCall", "PartitionedCall"])
def fix_node_def(node_def, functions, shared_name_suffix):
"""Replace functions calls and shared names in `node_def`."""
if node_def.op in functions:
node_def.op = functions[node_def.op].name
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
attr_value.func.name = functions[attr_value.func.name].name
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
fn.name = functions[fn.name].name
# Fix old table creation bug.
if node_def.op == "HashTableV2":
if ("use_node_name_sharing" not in node_def.attr or
not node_def.attr["use_node_name_sharing"].b):
node_def.attr["use_node_name_sharing"].b = True
# We are turning on node mame sharing, so have to make sure we don't
# accidentally share a table resource.
shared_name_suffix += "_{}".format(ops.uid())
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
# TODO: Add regression test for b/150826922.
op_def = op_def_registry.get(node_def.op)
if op_def:
attr = next((a for a in op_def.attr if a.name == "shared_name"), None)
if attr:
shared_name = None
if "shared_name" in node_def.attr and node_def.attr["shared_name"].s:
shared_name = node_def.attr["shared_name"].s
elif attr.default_value.s:
shared_name = compat.as_bytes(attr.default_value.s)
if not shared_name:
shared_name = compat.as_bytes(node_def.name)
node_def.attr["shared_name"].s = (
shared_name + compat.as_bytes(shared_name_suffix))
def _fix_fdef(orig_fdef, functions, shared_name_suffix):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
contains_custom_gradients = False
for node_def in fdef.node_def:
fix_node_def(node_def, functions, shared_name_suffix)
if not contains_custom_gradients:
contains_custom_gradients = _check_op_has_custom_gradients(node_def)
if contains_custom_gradients:
logging.warning(
"Importing a function (%s) with ops with custom gradients. Will likely "
"fail if a gradient is requested.", fdef.signature.name)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef, library_function_names):
"""Find functions referenced in `fdef`."""
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
if node_def.op in library_function_names:
deps.add(node_def.op)
else:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
deps.add(fn.name)
return deps
_FUNCTION_WRAPPER_NAME_REGEX = r"^%s(.*)_\d+$" % (function_lib._INFERENCE_PREFIX
) # pylint:disable=protected-access
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)
if match:
return match.group(1)
else:
return name
|
curoverse/libcloud | refs/heads/trunk | libcloud/common/cloudstack.py | 25 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import copy
import hmac
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import urlquote
from libcloud.utils.py3 import b
from libcloud.common.types import ProviderError
from libcloud.common.base import ConnectionUserAndKey, PollingConnection
from libcloud.common.base import JsonResponse
from libcloud.common.types import MalformedResponseError
from libcloud.compute.types import InvalidCredsError
class CloudStackResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('Invalid provider credentials')
value = None
body = self.parse_body()
if hasattr(body, 'values'):
values = list(body.values())[0]
if 'errortext' in values:
value = values['errortext']
if value is None:
value = self.body
if not value:
value = 'WARNING: error message text sent by provider was empty.'
error = ProviderError(value=value, http_code=self.status,
driver=self.connection.driver)
raise error
class CloudStackConnection(ConnectionUserAndKey, PollingConnection):
responseCls = CloudStackResponse
poll_interval = 1
request_method = '_sync_request'
timeout = 600
ASYNC_PENDING = 0
ASYNC_SUCCESS = 1
ASYNC_FAILURE = 2
def encode_data(self, data):
"""
Must of the data is sent as part of query params (eeww),
but in newer versions, userdata argument can be sent as a
urlencoded data in the request body.
"""
if data:
data = urlencode(data)
return data
def _make_signature(self, params):
signature = [(k.lower(), v) for k, v in list(params.items())]
signature.sort(key=lambda x: x[0])
pairs = []
for pair in signature:
key = urlquote(str(pair[0]), safe='[]')
value = urlquote(str(pair[1]), safe='[]')
item = '%s=%s' % (key, value)
pairs .append(item)
signature = '&'.join(pairs)
signature = signature.lower().replace('+', '%20')
signature = hmac.new(b(self.key), msg=b(signature),
digestmod=hashlib.sha1)
return base64.b64encode(b(signature.digest()))
def add_default_params(self, params):
params['apiKey'] = self.user_id
params['response'] = 'json'
return params
def pre_connect_hook(self, params, headers):
params['signature'] = self._make_signature(params)
return params, headers
def _async_request(self, command, action=None, params=None, data=None,
headers=None, method='GET', context=None):
if params:
context = copy.deepcopy(params)
else:
context = {}
# Command is specified as part of GET call
context['command'] = command
result = super(CloudStackConnection, self).async_request(
action=action, params=params, data=data, headers=headers,
method=method, context=context)
return result['jobresult']
def get_request_kwargs(self, action, params=None, data='', headers=None,
method='GET', context=None):
command = context['command']
request_kwargs = {'command': command, 'action': action,
'params': params, 'data': data,
'headers': headers, 'method': method}
return request_kwargs
def get_poll_request_kwargs(self, response, context, request_kwargs):
job_id = response['jobid']
params = {'jobid': job_id}
kwargs = {'command': 'queryAsyncJobResult', 'params': params}
return kwargs
def has_completed(self, response):
status = response.get('jobstatus', self.ASYNC_PENDING)
if status == self.ASYNC_FAILURE:
msg = response.get('jobresult', {}).get('errortext', status)
raise Exception(msg)
return status == self.ASYNC_SUCCESS
def _sync_request(self, command, action=None, params=None, data=None,
headers=None, method='GET'):
"""
This method handles synchronous calls which are generally fast
information retrieval requests and thus return 'quickly'.
"""
# command is always sent as part of "command" query parameter
if params:
params = copy.deepcopy(params)
else:
params = {}
params['command'] = command
result = self.request(action=self.driver.path, params=params,
data=data, headers=headers, method=method)
command = command.lower()
# Work around for older verions which don't return "response" suffix
# in delete ingress rule response command name
if (command == 'revokesecuritygroupingress' and
'revokesecuritygroupingressresponse' not in result.object):
command = command
else:
command = command + 'response'
if command not in result.object:
raise MalformedResponseError(
"Unknown response format",
body=result.body,
driver=self.driver)
result = result.object[command]
return result
class CloudStackDriverMixIn(object):
host = None
path = None
connectionCls = CloudStackConnection
def __init__(self, key, secret=None, secure=True, host=None, port=None):
host = host or self.host
super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host,
port)
def _sync_request(self, command, action=None, params=None, data=None,
headers=None, method='GET'):
return self.connection._sync_request(command=command, action=action,
params=params, data=data,
headers=headers, method=method)
def _async_request(self, command, action=None, params=None, data=None,
headers=None, method='GET', context=None):
return self.connection._async_request(command=command, action=action,
params=params, data=data,
headers=headers, method=method,
context=context)
|
Arakmar/Sick-Beard | refs/heads/development | lib/hachoir_parser/guess.py | 90 | """
Parser list managment:
- createParser() find the best parser for a file.
"""
import os
from lib.hachoir_core.error import warning, info, HACHOIR_ERRORS
from lib.hachoir_parser import ValidateError, HachoirParserList
from lib.hachoir_core.stream import FileInputStream
from lib.hachoir_core.i18n import _
class QueryParser(object):
fallback = None
other = None
def __init__(self, tags):
self.validate = True
self.use_fallback = False
self.parser_args = None
self.db = HachoirParserList.getInstance()
self.parsers = set(self.db)
parsers = []
for tag in tags:
if not self.parsers:
break
parsers += self._getByTag(tag)
if self.fallback is None:
self.fallback = len(parsers) == 1
if self.parsers:
other = len(parsers)
parsers += list(self.parsers)
self.other = parsers[other]
self.parsers = parsers
def __iter__(self):
return iter(self.parsers)
def translate(self, name, value):
if name == "filename":
filename = os.path.basename(value).split(".")
if len(filename) <= 1:
value = ""
else:
value = filename[-1].lower()
name = "file_ext"
return name, value
def _getByTag(self, tag):
if tag is None:
self.parsers.clear()
return []
elif callable(tag):
parsers = [ parser for parser in self.parsers if tag(parser) ]
for parser in parsers:
self.parsers.remove(parser)
elif tag[0] == "class":
self.validate = False
return [ tag[1] ]
elif tag[0] == "args":
self.parser_args = tag[1]
return []
else:
tag = self.translate(*tag)
parsers = []
if tag is not None:
key = tag[0]
byname = self.db.bytag.get(key,{})
if tag[1] is None:
values = byname.itervalues()
else:
values = byname.get(tag[1],()),
if key == "id" and values:
self.validate = False
for value in values:
for parser in value:
if parser in self.parsers:
parsers.append(parser)
self.parsers.remove(parser)
return parsers
def parse(self, stream, fallback=True):
fb = None
warn = warning
for parser in self.parsers:
try:
parser_obj = parser(stream, validate=self.validate)
if self.parser_args:
for key, value in self.parser_args.iteritems():
setattr(parser_obj, key, value)
return parser_obj
except ValidateError, err:
res = unicode(err)
if fallback and self.fallback:
fb = parser
except HACHOIR_ERRORS, err:
res = unicode(err)
if warn:
if parser == self.other:
warn = info
warn(_("Skip parser '%s': %s") % (parser.__name__, res))
fallback = False
if self.use_fallback and fb:
warning(_("Force use of parser '%s'") % fb.__name__)
return fb(stream)
def guessParser(stream):
return QueryParser(stream.tags).parse(stream)
def createParser(filename, real_filename=None, tags=None):
"""
Create a parser from a file or returns None on error.
Options:
- filename (unicode): Input file name ;
- real_filename (str|unicode): Real file name.
"""
if not tags:
tags = []
stream = FileInputStream(filename, real_filename, tags=tags)
return guessParser(stream)
|
FoxerLee/iOS_sitp | refs/heads/master | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/pylib/gyp/xcode_emulation.py | 36 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == "executable"):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
return l.replace('$(SDKROOT)', sdk_root)
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError, "xcodebuild returned unexpected results"
except:
version = CLTVersion()
if version:
version = re.match('(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError, "No Xcode or CLT version detected!"
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
|
cadyyan/git-achievements-rewrite | refs/heads/gh-pages | GitAchievements/achievements/merchant.py | 1 | """
Merchant
Added an external repository with git remote add
"""
from GitAchievements.achievements import UsageLeveledAchievement
class MerchantAchievement(UsageLeveledAchievement):
"""
Merchant
Added an external repository with git remote add
"""
name = 'Merchant'
description = 'Added an external repository with git remote add'
cmd = 'remote add'
|
chhsiao90/gviewer | refs/heads/master | gviewer/store.py | 1 | class BaseDataStore(object):
""" Base absctract class for data store
Attributes:
walkers: list of Walkers, that would listener any message received from data store
"""
def __init__(self):
self.walkers = []
def on_message(self, message):
transformed_msg = self.transform(message)
for walker in self.walkers:
walker.recv(transformed_msg)
def register(self, walker):
self.walkers.append(walker)
def unregister(self, walker):
self.walkers.remove(walker)
def transform(self, msg):
return msg
def setup(self):
raise NotImplementedError
class StaticDataStore(BaseDataStore):
"""
Used for static unmodified data that load data at first time
Attributes:
messages: list of any type of message
"""
def __init__(self, messages):
super(StaticDataStore, self).__init__()
self.messages = messages
def setup(self):
for message in self.messages:
self.on_message(message)
class AsyncDataStore(BaseDataStore):
"""
Used for async data
Attributes:
register_func: callable that would accept a callable for on_message callback
"""
def __init__(self, register_func):
super(AsyncDataStore, self).__init__()
self.register_func = register_func
def setup(self):
self.register_func(self.on_message)
|
vmanoria/bluemix-hue-filebrowser | refs/heads/master | hue-3.8.1-bluemix/desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/tests/urls.py | 44 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.http import HttpResponse
from django.conf.urls.defaults import *
def get_user(request):
return HttpResponse(request.user.username)
urlpatterns = patterns('',
(r'^getuser/$', get_user),
(r'^openid/', include('django_openid_auth.urls')),
)
|
40223137/w1717 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_loader.py | 738 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
|
benspaulding/django | refs/heads/master | tests/regressiontests/forms/tests/util.py | 6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy
class FormsUtilTestCase(TestCase):
# Tests for forms/util.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), ' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"')
self.assertEqual(flatatt({}), '')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(unicode(ErrorList(ValidationError("Not \u03C0.").messages)),
'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", "Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
class VeryBadError:
def __unicode__(self): return "A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
|
joe-antognini/euler | refs/heads/master | 061/figuratives.py | 1 | #! /usr/bin/env python
#
# Project Euler
# Problem 61
#
# Find the sum of the only ordered set of six cyclic 4-digit numbers for
# which each polygonal type is represented by a different number in the set.
#
import time
start_time = time.time()
# First generate the figurative numbers
N = 150 # Some large number...
lb = 1000
ub = 10000
triangles = [n*(n+1)/2 for n in range(N)]
triangles = [elem for elem in triangles if lb <= elem < ub]
squares = [n*n for n in range(N)]
squares = [elem for elem in squares if lb <= elem < ub]
pentagons = [n*(3*n-1)/2 for n in range(N)]
pentagons = [elem for elem in pentagons if lb <= elem < ub]
hexagons = [n*(2*n-1) for n in range(N)]
hexagons = [elem for elem in hexagons if lb <= elem < ub]
heptagons = [n*(5*n-3)/2 for n in range(N)]
heptagons = [elem for elem in heptagons if lb <= elem < ub]
octagons = [n*(3*n-2) for n in range(N)]
octagons = [elem for elem in octagons if lb <= elem < ub]
figures = [squares, pentagons, hexagons, heptagons, octagons]
#figures = [squares, pentagons]
def split_integer(n):
'''Return a list with the first two digits and last two digits.'''
if n < lb or n >= ub:
raise ValueError('split_integer(): n must be four digits long')
first = n / 100
last = n % 100
return [first, last]
def find_index(lst, n):
'''Return the index of the first element of the list that is greater than
or equal to n.'''
lstlen = len(lst)
if lstlen <= 1:
return 0
if split_integer(lst[lstlen/2])[0] > n:
return find_index(lst[:lstlen/2], n)
else:
return find_index(lst[lstlen/2:], n) + lstlen/2
def next_link(cur_figures, prev, first_half):
if len(cur_figures) == 0 and prev == first_half:
return True
elif len(cur_figures) == 0 and prev != first_half:
return False
# Find the next link
for figure in cur_figures:
i = find_index(figure, prev)
other_figures = [elem for elem in cur_figures if elem != figure]
while split_integer(figure[i])[0] == prev:
next = split_integer(figure[i])[1]
if next_link(other_figures, next, first_half):
global total
total += figure[i]
return True
i += 1
return False
for elem in triangles:
total = 0
prev, next = split_integer(elem)
if next_link(figures, next, prev):
total += elem
break
print "Solution:", total
print "Running time:", time.time() - start_time, "s"
|
rkarpuzov/Aurumcoin-0.12 | refs/heads/master | qa/rpc-tests/test_framework/bignum.py | 230 | #
#
# bignum.py
#
# This file is copied from python-bitcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
jtg-gg/skia | refs/heads/dev12-m41 | platform_tools/android/tests/android_framework_gyp_tests.py | 145 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test gyp_to_android.py
"""
import os
import shutil
import sys
import tempfile
import test_variables
import unittest
sys.path.append(test_variables.ANDROID_DIR)
import gyp_gen.android_framework_gyp
GYPD_SUFFIX = ".gypd"
GYP_SUFFIX = ".gyp"
GYPI_SUFFIX = ".gypi"
OTHER_SUFFIX = ".txt"
class CleanGypdTest(unittest.TestCase):
def setUp(self):
self.__tmp_dir = tempfile.mkdtemp()
self.__num_files = 10
# Fill the dir with four types of files. .gypd files should be deleted by
# clean_gypd_files(), while the rest should be left alone.
for i in range(self.__num_files):
self.create_file('%s%s' % (str(i), GYPD_SUFFIX))
self.create_file('%s%s' % (str(i), GYPI_SUFFIX))
self.create_file('%s%s' % (str(i), GYP_SUFFIX))
self.create_file('%s%s' % (str(i), OTHER_SUFFIX))
def create_file(self, basename):
"""Create a file named 'basename' in self.__tmp_dir.
"""
f = tempfile.mkstemp(dir=self.__tmp_dir)
os.rename(f[1], os.path.join(self.__tmp_dir, basename))
self.assert_file_exists(basename)
def assert_file_exists(self, basename):
"""Assert that 'basename' exists in self.__tmp_dir.
"""
full_name = os.path.join(self.__tmp_dir, basename)
self.assertTrue(os.path.exists(full_name))
def assert_file_does_not_exist(self, basename):
"""Assert that 'basename' does not exist in self.__tmp_dir.
"""
full_name = os.path.join(self.__tmp_dir, basename)
self.assertFalse(os.path.exists(full_name))
def test_clean(self):
"""Test that clean_gypd_files() deletes .gypd files, and leaves others.
"""
gyp_gen.android_framework_gyp.clean_gypd_files(self.__tmp_dir)
for i in range(self.__num_files):
self.assert_file_exists('%s%s' % (str(i), GYPI_SUFFIX))
self.assert_file_exists('%s%s' % (str(i), GYP_SUFFIX))
self.assert_file_exists('%s%s' % (str(i), OTHER_SUFFIX))
# Only the GYPD files should have been deleted.
self.assert_file_does_not_exist('%s%s' % (str(i), GYPD_SUFFIX))
def tearDown(self):
shutil.rmtree(self.__tmp_dir)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(CleanGypdTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
netroby/vitess | refs/heads/master | test/topo_flavor/server.py | 19 | #!/usr/bin/env python
# Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
class TopoServer(object):
"""Base class that defines the required interface."""
def setup(self):
"""Initialize the topo server."""
raise NotImplementedError()
def teardown(self):
"""Teardown the topo server."""
raise NotImplementedError()
def flags(self):
"""Return a list of args that tell a Vitess process to use this topo server."""
raise NotImplementedError()
def wipe(self):
"""Wipe the Vitess paths in the topo server."""
raise NotImplementedError()
def flavor(self):
"""Return the name of this topo server flavor."""
return self.flavor_name
flavor_map = {}
__server = None
def topo_server():
return __server
def set_topo_server_flavor(flavor):
global __server
if flavor in flavor_map:
__server = flavor_map[flavor]
logging.debug("Using topo server flavor '%s'", flavor)
elif not flavor:
if len(flavor_map) == 1:
(flavor, __server) = flavor_map.iteritems().next()
logging.debug("Using default topo server flavor '%s'", flavor)
else:
logging.error(
"No --topo-server-flavor specified. Registered flavors: [%s]",
",".join(flavor_map.keys()))
return
else:
logging.error(
"Unknown topo server flavor '%s'. Registered flavors: [%s]", flavor,
",".join(flavor_map.keys()))
return
__server.flavor_name = flavor
|
chrism333/xpcc | refs/heads/develop | tools/system_design/builder/cpp_identifier.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009, Roboterclub Aachen e.V.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Roboterclub Aachen e.V. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ROBOTERCLUB AACHEN E.V. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ROBOTERCLUB AACHEN E.V. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import os
import builder_base
import filter.cpp as filter
class IdentifierBuilder(builder_base.Builder):
VERSION = "0.1"
def setup(self, optparser):
optparser.add_option(
"--namespace",
dest = "namespace",
default = "robot",
help = "Namespace of the generated identifiers.")
def generate(self):
# check the commandline options
if not self.options.outpath:
raise builder_base.BuilderException("You need to provide an output path!")
if self.options.namespace:
namespace = self.options.namespace
else:
raise builder_base.BuilderException("You need to provide a namespace!")
cppFilter = {
'enumElement': filter.enumElement,
'enumValue': filter.toHexValue,
}
template = self.template('templates/robot_identifier.tpl', filter=cppFilter)
components = []
for component in self.tree.components.iter(abstract=False):
components.append(component.flattened())
substitutions = {
'domains' : self.tree.domains,
'components': components,
'actions': self.tree.components.actions,
'events': self.tree.events,
'namespace': namespace
}
if os.path.splitext(self.options.outpath)[1] == '':
file = os.path.join(self.options.outpath, 'identifier.hpp')
else:
file = self.options.outpath
self.write(file, template.render(substitutions) + "\n")
# -----------------------------------------------------------------------------
if __name__ == '__main__':
IdentifierBuilder().run()
|
abhattad4/Digi-Menu | refs/heads/master | digimenu2/tests/view_tests/models.py | 160 | """
Regression tests for Django built-in views.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author)
class Meta:
abstract = True
def __str__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
|
nerdless/lifelines | refs/heads/master | lifelines/utils/__init__.py | 3 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import warnings
from datetime import datetime
import numpy as np
from numpy.linalg import inv
import pandas as pd
from pandas import to_datetime
class StatError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def qth_survival_times(q, survival_functions):
"""
This can be done much better.
Parameters:
q: a float between 0 and 1.
survival_functions: a (n,d) dataframe or numpy array.
If dataframe, will return index values (actual times)
If numpy array, will return indices.
Returns:
v: if d==1, returns a float, np.inf if infinity.
if d > 1, an DataFrame containing the first times the value was crossed.
"""
q = pd.Series(q)
assert (q <= 1).all() and (0 <= q).all(), 'q must be between 0 and 1'
survival_functions = pd.DataFrame(survival_functions)
if survival_functions.shape[1] == 1 and q.shape == (1,):
return survival_functions.apply(lambda s: qth_survival_time(q[0], s)).ix[0]
else:
return pd.DataFrame({_q: survival_functions.apply(lambda s: qth_survival_time(_q, s)) for _q in q})
def qth_survival_time(q, survival_function):
"""
Expects a Pandas series, returns the time when the qth probability is reached.
"""
if survival_function.iloc[-1] > q:
return np.inf
v = (survival_function <= q).idxmax(0)
return v
def median_survival_times(survival_functions):
return qth_survival_times(0.5, survival_functions)
def group_survival_table_from_events(groups, durations, event_observed, birth_times=None, limit=-1):
"""
Joins multiple event series together into dataframes. A generalization of
`survival_table_from_events` to data with groups. Previously called `group_event_series` pre 0.2.3.
Parameters:
groups: a (n,) array of individuals' group ids.
durations: a (n,) array of durations of each individual
event_observed: a (n,) array of event observations, 1 if observed, 0 else.
birth_times: a (n,) array of numbers representing
when the subject was first observed. A subject's death event is then at [birth times + duration observed].
Normally set to all zeros, but can be positive or negative.
Output:
- np.array of unique groups
- dataframe of removal count data at event_times for each group, column names are 'removed:<group name>'
- dataframe of observed count data at event_times for each group, column names are 'observed:<group name>'
- dataframe of censored count data at event_times for each group, column names are 'censored:<group name>'
Example:
#input
group_survival_table_from_events(waltonG, waltonT, np.ones_like(waltonT)) #data available in test_suite.py
#output
[
array(['control', 'miR-137'], dtype=object),
removed:control removed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
,
observed:control observed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
,
censored:control censored:miR-137
event_at
6 0 0
7 0 0
9 0 0
,
]
"""
n = np.max(groups.shape)
assert n == np.max(durations.shape) == np.max(event_observed.shape), "inputs must be of the same length."
if birth_times is None:
# Create some birth times
birth_times = np.zeros(np.max(durations.shape))
birth_times[:] = np.min(durations)
assert n == np.max(birth_times.shape), "inputs must be of the same length."
groups, durations, event_observed, birth_times = [pd.Series(np.reshape(data, (n,))) for data in [groups, durations, event_observed, birth_times]]
unique_groups = groups.unique()
for i, group in enumerate(unique_groups):
ix = groups == group
T = durations[ix]
C = event_observed[ix]
B = birth_times[ix]
group_name = str(group)
columns = [event_name + ":" + group_name for event_name in ['removed', 'observed', 'censored', 'entrance', 'at_risk']]
if i == 0:
data = survival_table_from_events(T, C, B, columns=columns)
else:
data = data.join(survival_table_from_events(T, C, B, columns=columns), how='outer')
data = data.fillna(0)
# hmmm pandas its too bad I can't do data.ix[:limit] and leave out the if.
if int(limit) != -1:
data = data.ix[:limit]
return unique_groups, data.filter(like='removed:'), data.filter(like='observed:'), data.filter(like='censored:')
def survival_table_from_events(death_times, event_observed, birth_times=None,
columns=["removed", "observed", "censored", "entrance", "at_risk"],
weights=None):
"""
Parameters:
death_times: (n,) array of event times
event_observed: (n,) boolean array, 1 if observed event, 0 is censored event.
birth_times: a (n,) array of numbers representing
when the subject was first observed. A subject's death event is then at [birth times + duration observed].
If None (default), birth_times are set to be the first observation or 0, which ever is smaller.
columns: a 3-length array to call the, in order, removed individuals, observed deaths
and censorships.
weights: Default None, otherwise (n,1) array. Optional argument to use weights for individuals.
Returns:
Pandas DataFrame with index as the unique times in event_times. The columns named
'removed' refers to the number of individuals who were removed from the population
by the end of the period. The column 'observed' refers to the number of removed
individuals who were observed to have died (i.e. not censored.) The column
'censored' is defined as 'removed' - 'observed' (the number of individuals who
left the population due to event_observed)
Example:
removed observed censored entrance at_risk
event_at
0 0 0 0 11 11
6 1 1 0 0 11
7 2 2 0 0 10
9 3 3 0 0 8
13 3 3 0 0 5
15 2 2 0 0 2
"""
removed, observed, censored, entrance, at_risk = columns
death_times = np.asarray(death_times)
if birth_times is None:
birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0])
else:
birth_times = np.asarray(birth_times)
if np.any(birth_times > death_times):
raise ValueError('birth time must be less than time of death.')
# deal with deaths and censorships
df = pd.DataFrame(death_times, columns=["event_at"])
df[removed] = 1 if weights is None else weights
df[observed] = np.asarray(event_observed)
death_table = df.groupby("event_at").sum()
death_table[censored] = (death_table[removed] - death_table[observed]).astype(int)
# deal with late births
births = pd.DataFrame(birth_times, columns=['event_at'])
births[entrance] = 1
births_table = births.groupby('event_at').sum()
event_table = death_table.join(births_table, how='outer', sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414
event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0)
return event_table.astype(float)
def survival_events_from_table(event_table, observed_deaths_col="observed", censored_col="censored"):
"""
This is the inverse of the function ``survival_table_from_events``.
Parameters
event_table: a pandas DataFrame with index as the durations (!!) and columns "observed" and "censored", referring to
the number of individuals that died and were censored at time t.
Returns
T: a np.array of durations of observation -- one element for each individual in the population.
C: a np.array of event observations -- one element for each individual in the population. 1 if observed, 0 else.
Ex: The survival table, as a pandas DataFrame:
observed censored
index
1 1 0
2 0 1
3 1 0
4 1 1
5 0 1
would return
T = np.array([ 1., 2., 3., 4., 4., 5.]),
C = np.array([ 1., 0., 1., 1., 0., 0.])
"""
columns = [observed_deaths_col, censored_col]
N = event_table[columns].sum().sum()
T = np.empty(N)
C = np.empty(N)
i = 0
for event_time, row in event_table.iterrows():
n = row[columns].sum()
T[i:i + n] = event_time
C[i:i + n] = np.r_[np.ones(row[columns[0]]), np.zeros(row[columns[1]])]
i += n
return T, C
def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq='D', dayfirst=False, na_values=None):
"""
This is a very flexible function for transforming arrays of start_times and end_times
to the proper format for lifelines: duration and event observation arrays.
Parameters:
start_times: an array, series or dataframe of start times. These can be strings, or datetimes.
end_times: an array, series or dataframe of end times. These can be strings, or datetimes.
These values can be None, or an empty string, which corresponds to censorship.
fill_date: the date to use if end_times is a None or empty string. This corresponds to last date
of observation. Anything after this date is also censored. Default: datetime.today()
freq: the units of time to use. See pandas 'freq'. Default 'D' for days.
day_first: convert assuming European-style dates, i.e. day/month/year.
na_values : list of values to recognize as NA/NaN. Ex: ['', 'NaT']
Returns:
T: a array of floats representing the durations with time units given by freq.
C: a boolean array of event observations: 1 if death observed, 0 else.
"""
fill_date = pd.to_datetime(fill_date)
freq_string = 'timedelta64[%s]' % freq
start_times = pd.Series(start_times).copy()
end_times = pd.Series(end_times).copy()
C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""]))
end_times[~C] = fill_date
start_times_ = to_datetime(start_times, dayfirst=dayfirst)
end_times_ = to_datetime(end_times, dayfirst=dayfirst, coerce=True)
deaths_after_cutoff = end_times_ > fill_date
C[deaths_after_cutoff] = False
T = (end_times_ - start_times_).map(lambda x: x.astype(freq_string).astype(float))
if (T < 0).sum():
warnings.warn("Warning: some values of start_times are after end_times")
return T.values, C.values
def l1_log_loss(event_times, predicted_event_times, event_observed=None):
"""
Calculates the l1 log-loss of predicted event times to true event times for *non-censored*
individuals only.
1/N \sum_{i} |log(t_i) - log(q_i)|
Parameters:
event_times: a (n,) array of observed survival times.
predicted_event_times: a (n,) array of predicted survival times.
event_observed: a (n,) array of censorship flags, 1 if observed,
0 if not. Default None assumes all observed.
Returns:
l1-log-loss: a scalar
"""
if event_observed is None:
event_observed = np.ones_like(event_times)
ix = event_observed.astype(bool)
return np.abs(np.log(event_times[ix]) - np.log(predicted_event_times[ix])).mean()
def l2_log_loss(event_times, predicted_event_times, event_observed=None):
"""
Calculates the l2 log-loss of predicted event times to true event times for *non-censored*
individuals only.
1/N \sum_{i} (log(t_i) - log(q_i))**2
Parameters:
event_times: a (n,) array of observed survival times.
predicted_event_times: a (n,) array of predicted survival times.
event_observed: a (n,) array of censorship flags, 1 if observed,
0 if not. Default None assumes all observed.
Returns:
l2-log-loss: a scalar
"""
if event_observed is None:
event_observed = np.ones_like(event_times)
ix = event_observed.astype(bool)
return np.power(np.log(event_times[ix]) - np.log(predicted_event_times[ix]), 2).mean()
def concordance_index(event_times, predicted_event_times, event_observed=None):
"""
Calculates the concordance index (C-index) between two series
of event times. The first is the real survival times from
the experimental data, and the other is the predicted survival
times from a model of some kind.
The concordance index is a value between 0 and 1 where,
0.5 is the expected result from random predictions,
1.0 is perfect concordance and,
0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0)
Score is usually 0.6-0.7 for survival models.
See:
Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in
developing models, evaluating assumptions and adequacy, and measuring and
reducing errors. Statistics in Medicine 1996;15(4):361-87.
Parameters:
event_times: a (n,) array of observed survival times.
predicted_event_times: a (n,) array of predicted survival times.
event_observed: a (n,) array of censorship flags, 1 if observed,
0 if not. Default None assumes all observed.
Returns:
c-index: a value between 0 and 1.
"""
event_times = np.array(event_times, dtype=float)
predicted_event_times = np.array(predicted_event_times, dtype=float)
# Allow for (n, 1) or (1, n) arrays
if event_times.ndim == 2 and (event_times.shape[0] == 1 or
event_times.shape[1] == 1):
# Flatten array
event_times = event_times.ravel()
# Allow for (n, 1) or (1, n) arrays
if (predicted_event_times.ndim == 2 and
(predicted_event_times.shape[0] == 1 or
predicted_event_times.shape[1] == 1)):
# Flatten array
predicted_event_times = predicted_event_times.ravel()
if event_times.shape != predicted_event_times.shape:
raise ValueError("Event times and predictions must have the same shape")
if event_times.ndim != 1:
raise ValueError("Event times can only be 1-dimensional: (n,)")
if event_observed is None:
event_observed = np.ones(event_times.shape[0], dtype=float)
else:
if event_observed.shape != event_times.shape:
raise ValueError("Observed events must be 1-dimensional of same length as event times")
event_observed = np.array(event_observed, dtype=float).ravel()
return _concordance_index(event_times,
predicted_event_times,
event_observed)
def coalesce(*args):
return next(s for s in args if s is not None)
def inv_normal_cdf(p):
def AandS_approximation(p):
# Formula 26.2.23 from A&S and help from John Cook ;)
# http://www.johndcook.com/normal_cdf_inverse.html
c_0 = 2.515517
c_1 = 0.802853
c_2 = 0.010328
d_1 = 1.432788
d_2 = 0.189269
d_3 = 0.001308
t = np.sqrt(-2 * np.log(p))
return t - (c_0 + c_1 * t + c_2 * t ** 2) / (1 + d_1 * t + d_2 * t * t + d_3 * t ** 3)
if p < 0.5:
return -AandS_approximation(p)
else:
return AandS_approximation(1 - p)
def k_fold_cross_validation(fitters, df, duration_col, event_col=None,
k=5, evaluation_measure=concordance_index,
predictor="predict_median", predictor_kwargs={}):
"""
Perform cross validation on a dataset. If multiple models are provided,
all models will train on each of the k subsets.
fitter(s): one or several objects which possess a method:
fit(self, data, duration_col, event_col)
Note that the last two arguments will be given as keyword arguments,
and that event_col is optional. The objects must also have
the "predictor" method defined below.
df: a Pandas dataframe with necessary columns `duration_col` and `event_col`, plus
other covariates. `duration_col` refers to the lifetimes of the subjects. `event_col`
refers to whether the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects lifetimes.
event_col: the column in dataframe that contains the subject's death observation. If left
as None, assumes all individuals are non-censored.
k: the number of folds to perform. n/k data will be withheld for testing on.
evaluation_measure: a function that accepts either (event_times, predicted_event_times),
or (event_times, predicted_event_times, event_observed)
and returns something (could be anything).
Default: statistics.concordance_index: (C-index)
between two series of event times
predictor: a string that matches a prediction method on the fitter instances.
For example, "predict_expectation" or "predict_percentile".
Default is "predict_median"
The interface for the method is:
predict(self, data, **optional_kwargs)
predictor_kwargs: keyword args to pass into predictor-method.
Returns:
(k,1) list of scores for each fold. The scores can be anything.
"""
# Make sure fitters is a list
try:
fitters = list(fitters)
except TypeError:
fitters = [fitters]
# Each fitter has its own scores
fitterscores = [[] for _ in fitters]
n, d = df.shape
df = df.copy()
if event_col is None:
event_col = 'E'
df[event_col] = 1.
df = df.reindex(np.random.permutation(df.index)).sort(event_col)
assignments = np.array((n // k + 1) * list(range(1, k + 1)))
assignments = assignments[:n]
testing_columns = df.columns - [duration_col, event_col]
for i in range(1, k + 1):
ix = assignments == i
training_data = df.ix[~ix]
testing_data = df.ix[ix]
T_actual = testing_data[duration_col].values
E_actual = testing_data[event_col].values
X_testing = testing_data[testing_columns]
for fitter, scores in zip(fitters, fitterscores):
# fit the fitter to the training data
fitter.fit(training_data, duration_col=duration_col, event_col=event_col)
T_pred = getattr(fitter, predictor)(X_testing, **predictor_kwargs).values
try:
scores.append(evaluation_measure(T_actual, T_pred, E_actual))
except TypeError:
scores.append(evaluation_measure(T_actual, T_pred))
# If a single fitter was given as argument, return a single result
if len(fitters) == 1:
return fitterscores[0]
else:
return fitterscores
def normalize(X, mean=None, std=None):
'''
Normalize X. If mean OR std is None, normalizes
X to have mean 0 and std 1.
'''
if mean is None or std is None:
mean = X.mean(0)
std = X.std(0)
return (X - mean) / std
def unnormalize(X, mean, std):
'''
Reverse a normalization. Requires the original mean and
standard deviation of the data set.
'''
return X * std + mean
def epanechnikov_kernel(t, T, bandwidth=1.):
M = 0.75 * (1 - ((t - T) / bandwidth) ** 2)
M[abs((t - T)) >= bandwidth] = 0
return M
def significance_code(p):
if p < 0.001:
return '***'
elif p < 0.01:
return '**'
elif p < 0.05:
return '*'
elif p < 0.1:
return '.'
else:
return ' '
def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None):
"""
Also known as Tikhonov regularization. This solves the minimization problem:
min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2
One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization
Parameters:
X: a (n,d) numpy array
Y: a (n,) numpy array
c1: a scalar
c2: a scalar
offset: a (d,) numpy array.
Returns:
beta_hat: the solution to the minimization problem.
V = (X*X^T + (c1+c2)I)^{-1} X^T
"""
n, d = X.shape
X = X.astype(float)
penalizer_matrix = (c1 + c2) * np.eye(d)
if offset is None:
offset = np.zeros((d,))
V_1 = inv(np.dot(X.T, X) + penalizer_matrix)
V_2 = (np.dot(X.T, Y) + c2 * offset)
beta = np.dot(V_1, V_2)
return beta, np.dot(V_1, X.T)
def _smart_search(minimizing_function, n, *args):
from scipy.optimize import fmin_powell
x = np.ones(n)
return fmin_powell(minimizing_function, x, args=args, disp=False)
def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):
"""
Called to compute the Kaplan Meier and Nelson-Aalen estimates.
"""
if reverse:
events = events.sort_index(ascending=False)
at_risk = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)
deaths = events['observed']
estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0)
var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0)
else:
deaths = events['observed']
at_risk = events['at_risk']
estimate_ = np.cumsum(_additive_f(at_risk, deaths))
var_ = np.cumsum(_additive_var(at_risk, deaths))
timeline = sorted(timeline)
estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)
var_ = var_.reindex(timeline, method='pad')
var_.index.name = 'timeline'
estimate_.index.name = 'timeline'
return estimate_, var_
def _preprocess_inputs(durations, event_observed, timeline, entry):
"""
Cleans and confirms input to what lifelines expects downstream
"""
n = len(durations)
durations = np.asarray(durations).reshape((n,))
# set to all observed if event_observed is none
if event_observed is None:
event_observed = np.ones(n, dtype=int)
else:
event_observed = np.asarray(event_observed).reshape((n,)).copy().astype(int)
if entry is not None:
entry = np.asarray(entry).reshape((n,))
event_table = survival_table_from_events(durations, event_observed, entry)
if timeline is None:
timeline = event_table.index.values
else:
timeline = np.asarray(timeline)
return durations, event_observed, timeline.astype(float), entry, event_table
def _get_index(X):
if isinstance(X, pd.DataFrame):
index = list(X.index)
else:
# If it's not a dataframe, order is up to user
index = list(range(X.shape[0]))
return index
class _BTree(object):
"""A simple balanced binary order statistic tree to help compute the concordance.
When computing the concordance, we know all the values the tree will ever contain. That
condition simplifies this tree a lot. It means that instead of crazy AVL/red-black shenanigans
we can simply do the following:
- Store the final tree in flattened form in an array (so node i's children are 2i+1, 2i+2)
- Additionally, store the current size of each subtree in another array with the same indices
- To insert a value, just find its index, increment the size of the subtree at that index and
propagate
- To get the rank of an element, you add up a bunch of subtree counts
"""
def __init__(self, values):
"""
Parameters:
values: List of sorted (ascending), unique values that will be inserted.
"""
self._tree = self._treeify(values)
self._counts = np.zeros_like(self._tree, dtype=int)
@staticmethod
def _treeify(values):
"""Convert the np.ndarray `values` into a complete balanced tree.
Assumes `values` is sorted ascending. Returns a list `t` of the same length in which t[i] >
t[2i+1] and t[i] < t[2i+2] for all i."""
if len(values) == 1: # this case causes problems later
return values
tree = np.empty_like(values)
# Tree indices work as follows:
# 0 is the root
# 2n+1 is the left child of n
# 2n+2 is the right child of n
# So we now rearrange `values` into that format...
# The first step is to remove the bottom row of leaves, which might not be exactly full
last_full_row = int(np.log2(len(values) + 1) - 1)
len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)
if len_ragged_row > 0:
bottom_row_ix = np.s_[:2 * len_ragged_row:2]
tree[-len_ragged_row:] = values[bottom_row_ix]
values = np.delete(values, bottom_row_ix)
# Now `values` is length 2**n - 1, so can be packed efficiently into a tree
# Last row of nodes is indices 0, 2, ..., 2**n - 2
# Second-last row is indices 1, 5, ..., 2**n - 3
# nth-last row is indices (2**n - 1)::(2**(n+1))
values_start = 0
values_space = 2
values_len = 2 ** last_full_row
while values_start < len(values):
tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space]
values_start += int(values_space / 2)
values_space *= 2
values_len = int(values_len / 2)
return tree
def insert(self, value):
"""Insert an occurrence of `value` into the btree."""
i = 0
n = len(self._tree)
while i < n:
cur = self._tree[i]
self._counts[i] += 1
if value < cur:
i = 2 * i + 1
elif value > cur:
i = 2 * i + 2
else:
return
raise ValueError("Value %s not contained in tree."
"Also, the counts are now messed up." % value)
def __len__(self):
return self._counts[0]
def rank(self, value):
"""Returns the rank and count of the value in the btree."""
i = 0
n = len(self._tree)
rank = 0
count = 0
while i < n:
cur = self._tree[i]
if value < cur:
i = 2 * i + 1
continue
elif value > cur:
rank += self._counts[i]
# subtract off the right tree if exists
nexti = 2 * i + 2
if nexti < n:
rank -= self._counts[nexti]
i = nexti
continue
else:
return (rank, count)
else: # value == cur
count = self._counts[i]
lefti = 2 * i + 1
if lefti < n:
nleft = self._counts[lefti]
count -= nleft
rank += nleft
righti = lefti + 1
if righti < n:
count -= self._counts[righti]
return (rank, count)
return (rank, count)
def _concordance_index(event_times, predicted_event_times, event_observed):
"""Find the concordance index in n * log(n) time.
Assumes the data has been verified by lifelines.utils.concordance_index first.
"""
# Here's how this works.
#
# It would be pretty easy to do if we had no censored data and no ties. There, the basic idea
# would be to iterate over the cases in order of their true event time (from least to greatest),
# while keeping track of a pool of *predicted* event times for all cases previously seen (= all
# cases that we know should be ranked lower than the case we're looking at currently).
#
# If the pool has O(log n) insert and O(log n) RANK (i.e., "how many things in the pool have
# value less than x"), then the following algorithm is n log n:
#
# Sort the times and predictions by time, increasing
# n_pairs, n_correct := 0
# pool := {}
# for each prediction p:
# n_pairs += len(pool)
# n_correct += rank(pool, p)
# add p to pool
#
# There are three complications: tied ground truth values, tied predictions, and censored
# observations.
#
# - To handle tied true event times, we modify the inner loop to work in *batches* of observations
# p_1, ..., p_n whose true event times are tied, and then add them all to the pool
# simultaneously at the end.
#
# - To handle tied predictions, which should each count for 0.5, we switch to
# n_correct += min_rank(pool, p)
# n_tied += count(pool, p)
#
# - To handle censored observations, we handle each batch of tied, censored observations just
# after the batch of observations that died at the same time (since those censored observations
# are comparable all the observations that died at the same time or previously). However, we do
# NOT add them to the pool at the end, because they are NOT comparable with any observations
# that leave the study afterward--whether or not those observations get censored.
died_mask = event_observed.astype(bool)
# TODO: is event_times already sorted? That would be nice...
died_truth = event_times[died_mask]
ix = np.argsort(died_truth)
died_truth = died_truth[ix]
died_pred = predicted_event_times[died_mask][ix]
censored_truth = event_times[~died_mask]
ix = np.argsort(censored_truth)
censored_truth = censored_truth[ix]
censored_pred = predicted_event_times[~died_mask][ix]
censored_ix = 0
died_ix = 0
times_to_compare = _BTree(np.unique(died_pred))
num_pairs = 0
num_correct = 0
num_tied = 0
def handle_pairs(truth, pred, first_ix):
"""
Handle all pairs that exited at the same time as truth[first_ix].
Returns:
(pairs, correct, tied, next_ix)
new_pairs: The number of new comparisons performed
new_correct: The number of comparisons correctly predicted
next_ix: The next index that needs to be handled
"""
next_ix = first_ix
while next_ix < len(truth) and truth[next_ix] == truth[first_ix]:
next_ix += 1
pairs = len(times_to_compare) * (next_ix - first_ix)
correct = 0
tied = 0
for i in range(first_ix, next_ix):
rank, count = times_to_compare.rank(pred[i])
correct += rank
tied += count
return (pairs, correct, tied, next_ix)
# we iterate through cases sorted by exit time:
# - First, all cases that died at time t0. We add these to the sortedlist of died times.
# - Then, all cases that were censored at time t0. We DON'T add these since they are NOT
# comparable to subsequent elements.
while True:
has_more_censored = censored_ix < len(censored_truth)
has_more_died = died_ix < len(died_truth)
# Should we look at some censored indices next, or died indices?
if has_more_censored and (not has_more_died
or died_truth[died_ix] > censored_truth[censored_ix]):
pairs, correct, tied, next_ix = handle_pairs(censored_truth, censored_pred, censored_ix)
censored_ix = next_ix
elif has_more_died and (not has_more_censored
or died_truth[died_ix] <= censored_truth[censored_ix]):
pairs, correct, tied, next_ix = handle_pairs(died_truth, died_pred, died_ix)
for pred in died_pred[died_ix:next_ix]:
times_to_compare.insert(pred)
died_ix = next_ix
else:
assert not (has_more_died or has_more_censored)
break
num_pairs += pairs
num_correct += correct
num_tied += tied
return (num_correct + num_tied / 2) / num_pairs
def _naive_concordance_index(event_times, predicted_event_times, event_observed):
"""
Fallback, simpler method to compute concordance.
Assumes the data has been verified by lifelines.utils.concordance_index first.
"""
def valid_comparison(time_a, time_b, event_a, event_b):
"""True if times can be compared."""
if time_a == time_b:
# Ties are only informative if exactly one event happened
return event_a != event_b
elif event_a and event_b:
return True
elif event_a and time_a < time_b:
return True
elif event_b and time_b < time_a:
return True
else:
return False
def concordance_value(time_a, time_b, pred_a, pred_b):
if pred_a == pred_b:
# Same as random
return 0.5
elif pred_a < pred_b:
return (time_a < time_b) or (time_a == time_b and event_a and not event_b)
else: # pred_a > pred_b
return (time_a > time_b) or (time_a == time_b and not event_a and event_b)
paircount = 0.0
csum = 0.0
for a in range(0, len(event_times)):
time_a = event_times[a]
pred_a = predicted_event_times[a]
event_a = event_observed[a]
# Don't want to double count
for b in range(a + 1, len(event_times)):
time_b = event_times[b]
pred_b = predicted_event_times[b]
event_b = event_observed[b]
if valid_comparison(time_a, time_b, event_a, event_b):
paircount += 1.0
csum += concordance_value(time_a, time_b, pred_a, pred_b)
return csum / paircount
|
emedvedev/st2 | refs/heads/master | st2common/tests/unit/test_rbac_resolvers_action.py | 6 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.action import Action
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.action import ActionDB
from st2common.models.api.action import ActionAPI
from st2common.rbac.resolvers import ActionPermissionsResolver
from tests.unit.test_rbac_resolvers import BasePermissionsResolverTestCase
__all__ = [
'ActionPermissionsResolverTestCase'
]
class ActionPermissionsResolverTestCase(BasePermissionsResolverTestCase):
def setUp(self):
super(ActionPermissionsResolverTestCase, self).setUp()
# Create some mock users
user_1_db = UserDB(name='1_role_action_pack_grant')
user_1_db = User.add_or_update(user_1_db)
self.users['custom_role_action_pack_grant'] = user_1_db
user_2_db = UserDB(name='1_role_action_grant')
user_2_db = User.add_or_update(user_2_db)
self.users['custom_role_action_grant'] = user_2_db
user_3_db = UserDB(name='custom_role_pack_action_all_grant')
user_3_db = User.add_or_update(user_3_db)
self.users['custom_role_pack_action_all_grant'] = user_3_db
user_4_db = UserDB(name='custom_role_action_all_grant')
user_4_db = User.add_or_update(user_4_db)
self.users['custom_role_action_all_grant'] = user_4_db
user_5_db = UserDB(name='custom_role_action_execute_grant')
user_5_db = User.add_or_update(user_5_db)
self.users['custom_role_action_execute_grant'] = user_5_db
user_6_db = UserDB(name='action_pack_action_create_grant')
user_6_db = User.add_or_update(user_6_db)
self.users['action_pack_action_create_grant'] = user_6_db
user_7_db = UserDB(name='action_pack_action_all_grant')
user_7_db = User.add_or_update(user_7_db)
self.users['action_pack_action_all_grant'] = user_7_db
user_8_db = UserDB(name='action_action_create_grant')
user_8_db = User.add_or_update(user_8_db)
self.users['action_action_create_grant'] = user_8_db
user_9_db = UserDB(name='action_action_all_grant')
user_9_db = User.add_or_update(user_9_db)
self.users['action_action_all_grant'] = user_9_db
user_10_db = UserDB(name='custom_role_action_list_grant')
user_10_db = User.add_or_update(user_10_db)
self.users['custom_role_action_list_grant'] = user_10_db
# Create some mock resources on which permissions can be granted
action_1_db = ActionDB(pack='test_pack_1', name='action1', entry_point='',
runner_type={'name': 'run-local'})
action_1_db = Action.add_or_update(action_1_db)
self.resources['action_1'] = action_1_db
action_2_db = ActionDB(pack='test_pack_1', name='action2', entry_point='',
runner_type={'name': 'run-local'})
action_2_db = Action.add_or_update(action_1_db)
self.resources['action_2'] = action_2_db
action_3_db = ActionDB(pack='test_pack_2', name='action3', entry_point='',
runner_type={'name': 'run-local'})
action_3_db = Action.add_or_update(action_3_db)
self.resources['action_3'] = action_3_db
# Create some mock roles with associated permission grants
# Custom role 2 - one grant on parent pack
# "action_view" on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_3_db = RoleDB(name='custom_role_action_pack_grant',
permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['custom_role_action_pack_grant'] = role_3_db
# Custom role 4 - one grant on action
# "action_view" on action_3
grant_db = PermissionGrantDB(resource_uid=self.resources['action_3'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_action_grant', permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_action_grant'] = role_4_db
# Custom role - "action_all" grant on a parent action pack
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_pack_action_all_grant',
permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_pack_action_all_grant'] = role_4_db
# Custom role - "action_all" grant on action
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_4_db = RoleDB(name='custom_role_action_all_grant', permission_grants=permission_grants)
role_4_db = Role.add_or_update(role_4_db)
self.roles['custom_role_action_all_grant'] = role_4_db
# Custom role - "action_execute" on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_EXECUTE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_5_db = RoleDB(name='custom_role_action_execute_grant',
permission_grants=permission_grants)
role_5_db = Role.add_or_update(role_5_db)
self.roles['custom_role_action_execute_grant'] = role_5_db
# Custom role - "action_create" grant on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_6_db = RoleDB(name='action_pack_action_create_grant',
permission_grants=permission_grants)
role_6_db = Role.add_or_update(role_6_db)
self.roles['action_pack_action_create_grant'] = role_6_db
# Custom role - "action_all" grant on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_7_db = RoleDB(name='action_pack_action_all_grant',
permission_grants=permission_grants)
role_7_db = Role.add_or_update(role_7_db)
self.roles['action_pack_action_all_grant'] = role_7_db
# Custom role - "action_create" grant on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_8_db = RoleDB(name='action_action_create_grant',
permission_grants=permission_grants)
role_8_db = Role.add_or_update(role_8_db)
self.roles['action_action_create_grant'] = role_8_db
# Custom role - "action_all" grant on action_1
grant_db = PermissionGrantDB(resource_uid=self.resources['action_1'].get_uid(),
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_ALL])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_9_db = RoleDB(name='action_action_all_grant',
permission_grants=permission_grants)
role_9_db = Role.add_or_update(role_9_db)
self.roles['action_action_all_grant'] = role_9_db
# Custom role - "action_list" grant
grant_db = PermissionGrantDB(resource_uid=None,
resource_type=None,
permission_types=[PermissionType.ACTION_LIST])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_10_db = RoleDB(name='custom_role_action_list_grant',
permission_grants=permission_grants)
role_10_db = Role.add_or_update(role_10_db)
self.roles['custom_role_action_list_grant'] = role_10_db
# Create some mock role assignments
user_db = self.users['custom_role_action_pack_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_pack_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_grant']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_action_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_pack_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_pack_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_execute_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_execute_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_pack_action_create_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_pack_action_create_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_pack_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_pack_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_action_create_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_action_create_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['action_action_all_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['action_action_all_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_action_list_grant']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['custom_role_action_list_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_user_has_permission(self):
resolver = ActionPermissionsResolver()
# Admin user, should always return true
user_db = self.users['admin']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Observer, should always return true for VIEW permissions
user_db = self.users['observer']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# No roles, should return false for everything
user_db = self.users['no_roles']
self.assertUserDoesntHavePermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Custom role with no permission grants, should return false for everything
user_db = self.users['1_custom_role_no_permissions']
self.assertUserDoesntHavePermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
# Custom role with "action_list" grant
user_db = self.users['custom_role_action_list_grant']
self.assertUserHasPermission(resolver=resolver,
user_db=user_db,
permission_type=PermissionType.ACTION_LIST)
def test_user_has_resource_api_permission(self):
resolver = ActionPermissionsResolver()
# Admin user, should always return true
user_db = self.users['admin']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Observer, should return false
user_db = self.users['observer']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# No roles, should return false
user_db = self.users['no_roles']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with no permission grants, should return false
user_db = self.users['1_custom_role_no_permissions']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserDoesntHaveResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_create" grant on parent pack
user_db = self.users['action_pack_action_create_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_all" grant on the parent pack
user_db = self.users['action_pack_action_all_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_create" grant directly on the resource
user_db = self.users['action_action_create_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
# Custom role with "action_all" grant directly on the resource
user_db = self.users['action_action_all_grant']
resource_db = self.resources['action_1']
resource_api = ActionAPI.from_model(resource_db)
self.assertUserHasResourceApiPermission(
resolver=resolver,
user_db=user_db,
resource_api=resource_api,
permission_type=PermissionType.ACTION_CREATE)
def test_user_has_resource_db_permission(self):
resolver = ActionPermissionsResolver()
all_permission_types = PermissionType.get_valid_permissions_for_resource_type(
ResourceType.ACTION)
# Admin user, should always return true
resource_db = self.resources['action_1']
user_db = self.users['admin']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Observer, should always return true for VIEW permission
user_db = self.users['observer']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_MODIFY)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_DELETE)
# No roles, should return false for everything
user_db = self.users['no_roles']
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role with no permission grants, should return false for everything
user_db = self.users['1_custom_role_no_permissions']
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role with unrelated permission grant to parent pack
user_db = self.users['custom_role_pack_grant']
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role with with grant on the parent pack
user_db = self.users['custom_role_action_pack_grant']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_1'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role with a direct grant on action
user_db = self.users['custom_role_action_grant']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_3'],
permission_type=PermissionType.ACTION_VIEW)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_2'],
permission_type=PermissionType.ACTION_EXECUTE)
self.assertUserDoesntHaveResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=self.resources['action_3'],
permission_type=PermissionType.ACTION_EXECUTE)
# Custom role - "action_all" grant on the action parent pack
user_db = self.users['custom_role_pack_action_all_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role - "action_all" grant on the action
user_db = self.users['custom_role_action_all_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=all_permission_types)
# Custom role - "action_execute" grant on action_1
user_db = self.users['custom_role_action_execute_grant']
resource_db = self.resources['action_1']
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_type=PermissionType.ACTION_EXECUTE)
# "execute" also grants "view"
self.assertUserHasResourceDbPermission(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_type=PermissionType.ACTION_VIEW)
permission_types = [
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE
]
self.assertUserDoesntHaveResourceDbPermissions(
resolver=resolver,
user_db=user_db,
resource_db=resource_db,
permission_types=permission_types)
|
plotly/plotly.py | refs/heads/master | packages/python/plotly/plotly/validators/scatterternary/_hoverinfo.py | 1 | import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="scatterternary", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["a", "b", "c", "text", "name"]),
**kwargs
)
|
pranjalpatil/django-allauth | refs/heads/master | allauth/socialaccount/providers/flickr/provider.py | 50 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class FlickrAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data \
.get('person').get('profileurl').get('_content')
def get_avatar_url(self):
return self.account.extra_data.get('picture-url')
def to_str(self):
dflt = super(FlickrAccount, self).to_str()
name = self.account.extra_data \
.get('person').get('realname').get('_content', dflt)
return name
class FlickrProvider(OAuthProvider):
id = 'flickr'
name = 'Flickr'
package = 'allauth.socialaccount.providers.flickr'
account_class = FlickrAccount
def get_default_scope(self):
scope = []
return scope
def get_auth_params(self, request, action):
ret = super(FlickrProvider, self).get_auth_params(request,
action)
if 'perms' not in ret:
ret['perms'] = 'read'
return ret
def get_profile_fields(self):
default_fields = ['id',
'first-name',
'last-name',
'email-address',
'picture-url',
'public-profile-url']
fields = self.get_settings().get('PROFILE_FIELDS',
default_fields)
return fields
def extract_uid(self, data):
return data['person']['nsid']
def extract_common_fields(self, data):
person = data.get('person', {})
name = person.get('realname', {}).get('_content')
username = person.get('username', {}).get('_content')
return dict(email=data.get('email-address'),
name=name,
username=username)
providers.registry.register(FlickrProvider)
|
hajuuk/R7000 | refs/heads/master | ap/gpl/timemachine/libxml2-2.7.2/python/tests/reader6.py | 87 | #!/usr/bin/python -u
#
# this tests the entities substitutions with the XmlTextReader interface
#
import sys
import StringIO
import libxml2
schema="""<element name="foo" xmlns="http://relaxng.org/ns/structure/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<oneOrMore>
<element name="label">
<text/>
</element>
<optional>
<element name="opt">
<empty/>
</element>
</optional>
<element name="item">
<data type="byte"/>
</element>
</oneOrMore>
</element>
"""
# Memory debug specific
libxml2.debugMemory(1)
#
# Parse the Relax NG Schemas
#
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
del rngp
#
# Parse and validate the correct document
#
docstr="""<foo>
<label>some text</label>
<item>100</item>
</foo>"""
f = StringIO.StringIO(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("correct")
reader.RelaxNGSetSchema(rngs)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print "Error parsing the document"
sys.exit(1)
if reader.IsValid() != 1:
print "Document failed to validate"
sys.exit(1)
#
# Parse and validate the incorrect document
#
docstr="""<foo>
<label>some text</label>
<item>1000</item>
</foo>"""
err=""
# RNG errors are not as good as before , TODO
#expect="""RNG validity error: file error line 3 element text
#Type byte doesn't allow value '1000'
#RNG validity error: file error line 3 element text
#Error validating datatype byte
#RNG validity error: file error line 3 element text
#Element item failed to validate content
#"""
expect="""Type byte doesn't allow value '1000'
Error validating datatype byte
Element item failed to validate content
"""
def callback(ctx, str):
global err
err = err + "%s" % (str)
libxml2.registerErrorHandler(callback, "")
f = StringIO.StringIO(docstr)
input = libxml2.inputBuffer(f)
reader = input.newTextReader("error")
reader.RelaxNGSetSchema(rngs)
ret = reader.Read()
while ret == 1:
ret = reader.Read()
if ret != 0:
print "Error parsing the document"
sys.exit(1)
if reader.IsValid() != 0:
print "Document failed to detect the validation error"
sys.exit(1)
if err != expect:
print "Did not get the expected error message:"
print err
sys.exit(1)
#
# cleanup
#
del f
del input
del reader
del rngs
libxml2.relaxNGCleanupTypes()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
tvtsoft/odoo8 | refs/heads/master | openerp/tests/addons/test_translation_import/__openerp__.py | 352 | # -*- coding: utf-8 -*-
{
'name': 'test-translation-import',
'version': '0.1',
'category': 'Tests',
'description': """A module to test translation import.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['view.xml'],
'test': ['tests.yml'],
'installable': True,
'auto_install': False,
}
|
shawnps/xhtml2pdf | refs/heads/master | demo/tgpisa/setup.py | 168 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from turbogears.finddata import find_package_data
import os
execfile(os.path.join("tgpisa", "release.py"))
packages=find_packages()
package_data = find_package_data(where='tgpisa',
package='tgpisa')
if os.path.isdir('locales'):
packages.append('locales')
package_data.update(find_package_data(where='locales',
exclude=('*.po',), only_in_packages=False))
setup(
name="tgpisa",
version=version,
# uncomment the following lines if you fill them out in release.py
#description=description,
#author=author,
#author_email=email,
#url=url,
#download_url=download_url,
#license=license,
install_requires=[
"TurboGears >= 1.0.4.3",
"SQLObject>=0.8,<=0.10.0"
],
zip_safe=False,
packages=packages,
package_data=package_data,
keywords=[
# Use keywords if you'll be adding your package to the
# Python Cheeseshop
# if this has widgets, uncomment the next line
# 'turbogears.widgets',
# if this has a tg-admin command, uncomment the next line
# 'turbogears.command',
# if this has identity providers, uncomment the next line
# 'turbogears.identity.provider',
# If this is a template plugin, uncomment the next line
# 'python.templating.engines',
# If this is a full application, uncomment the next line
# 'turbogears.app',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: TurboGears',
# if this is an application that you'll distribute through
# the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Applications',
# if this is a package that includes widgets that you'll distribute
# through the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Widgets',
],
test_suite='nose.collector',
entry_points = {
'console_scripts': [
'start-tgpisa = tgpisa.commands:start',
],
},
# Uncomment next line and create a default.cfg file in your project dir
# if you want to package a default configuration in your egg.
#data_files = [('config', ['default.cfg'])],
)
|
fighterCui/L4ReFiascoOC | refs/heads/master | l4/pkg/python/contrib/Mac/Modules/osa/setup.py | 39 | # This is a temporary setup script to allow distribution of
# MacPython 2.4 modules for MacPython 2.3.
from distutils.core import Extension, setup
setup(name="OSA", version="0.1",
ext_modules=[
Extension('_OSA', ['_OSAmodule.c'],
extra_link_args=['-framework', 'Carbon'])
],
py_modules=['OSA.OSA', 'OSA.OSAconst'],
package_dir={'OSA':'../../../Lib/plat-mac/Carbon'}
)
|
mdaniel/intellij-community | refs/heads/master | python/testData/inspections/ChainedComparisons.py | 83 | <weak_warning descr="Simplify chained comparison">a < b <caret>and b < c</weak_warning> |
kave/collab | refs/heads/master | core/migrations/0004_move_apps_to_core.py | 5 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, DatabaseError
class Migration(SchemaMigration):
def forwards(self, orm):
try:
# Check if front exists
db.execute('select * from front_person limit 1')
models = ['person', 'orggroup',
'wikihighlight', 'alert', 'officelocation']
for model in models:
# Delete core tables if they exist
try:
db.delete_table('core_' + model)
except:
pass
db.rename_table('front_' + model, 'core_' + model)
if not db.dry_run:
orm['contenttypes.ContentType'].objects.filter(
app_label='core').delete()
orm['contenttypes.ContentType'].objects.filter(
app_label='front').update(app_label='core')
except DatabaseError:
pass
def backwards(self, orm):
models = ['person', 'orggroup',
'wikihighlight', 'alert', 'officelocation']
for model in models:
db.rename_table('core_' + model, 'front_' + model)
if not db.dry_run:
orm['contenttypes.ContentType'].objects.filter(
app_label='front').delete()
orm['contenttypes.ContentType'].objects.filter(
app_label='core').update(app_label='front')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.alert': {
'Meta': {'object_name': 'Alert'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'core.app': {
'Meta': {'object_name': 'App'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'icon_file': ('core.thumbs.ImageWithThumbsField', [], {'default': "'app_icons/default.jpg'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'stub': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.officelocation': {
'Meta': {'object_name': 'OfficeLocation'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '56'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '12', 'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '56'}),
'suite': ('django.db.models.fields.CharField', [], {'max_length': '56', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'core.orggroup': {
'Meta': {'object_name': 'OrgGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.OrgGroup']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.person': {
'Meta': {'object_name': 'Person'},
'allow_tagging': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'current_projects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desk_location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'office_location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.OfficeLocation']", 'null': 'True', 'blank': 'True'}),
'office_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'org_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.OrgGroup']", 'null': 'True', 'blank': 'True'}),
'photo_file': ('core.thumbs.ImageWithThumbsField', [], {'default': "'avatars/default.jpg'", 'max_length': '100'}),
'schools_i_attended': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'stub': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'stuff_ive_done': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'things_im_good_at': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'what_i_do': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'core.wikihighlight': {
'Meta': {'object_name': 'WikiHighlight'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'posted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2048'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.tagcategory': {
'Meta': {'object_name': 'TagCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}),
'tag_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['taggit.TagCategory']", 'null': 'True'}),
'tag_creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_related'", 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['core']
|
AZed/duplicity | refs/heads/master | testing/tests/selectiontest.py | 4 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import types
import helper
import StringIO, unittest, sys
from duplicity.selection import * #@UnusedWildImport
from duplicity.lazy import * #@UnusedWildImport
helper.setup()
class MatchingTest(unittest.TestCase):
"""Test matching of file names against various selection functions"""
def setUp(self):
assert not os.system("tar xzf testfiles.tar.gz > /dev/null 2>&1")
self.root = Path("testfiles/select")
self.Select = Select(self.root)
def tearDown(self):
assert not os.system("rm -rf testfiles tempdir temp2.tar")
def makeext(self, path):
return self.root.new_index(tuple(path.split("/")))
def testRegexp(self):
"""Test regular expression selection func"""
sf1 = self.Select.regexp_get_sf(".*\.py", 1)
assert sf1(self.makeext("1.py")) == 1
assert sf1(self.makeext("usr/foo.py")) == 1
assert sf1(self.root.append("1.doc")) == None
sf2 = self.Select.regexp_get_sf("hello", 0)
assert sf2(Path("hello")) == 0
assert sf2(Path("foohello_there")) == 0
assert sf2(Path("foo")) == None
def testTupleInclude(self):
"""Test include selection function made from a regular filename"""
self.assertRaises(FilePrefixError,
self.Select.glob_get_filename_sf, "foo", 1)
sf2 = self.Select.glob_get_sf("testfiles/select/usr/local/bin/", 1)
assert sf2(self.makeext("usr")) == 1
assert sf2(self.makeext("usr/local")) == 1
assert sf2(self.makeext("usr/local/bin")) == 1
assert sf2(self.makeext("usr/local/doc")) == None
assert sf2(self.makeext("usr/local/bin/gzip")) == 1
assert sf2(self.makeext("usr/local/bingzip")) == None
def testTupleExclude(self):
"""Test exclude selection function made from a regular filename"""
self.assertRaises(FilePrefixError,
self.Select.glob_get_filename_sf, "foo", 0)
sf2 = self.Select.glob_get_sf("testfiles/select/usr/local/bin/", 0)
assert sf2(self.makeext("usr")) == None
assert sf2(self.makeext("usr/local")) == None
assert sf2(self.makeext("usr/local/bin")) == 0
assert sf2(self.makeext("usr/local/doc")) == None
assert sf2(self.makeext("usr/local/bin/gzip")) == 0
assert sf2(self.makeext("usr/local/bingzip")) == None
def testGlobStarInclude(self):
"""Test a few globbing patterns, including **"""
sf1 = self.Select.glob_get_sf("**", 1)
assert sf1(self.makeext("foo")) == 1
assert sf1(self.makeext("")) == 1
sf2 = self.Select.glob_get_sf("**.py", 1)
assert sf2(self.makeext("foo")) == 2
assert sf2(self.makeext("usr/local/bin")) == 2
assert sf2(self.makeext("what/ever.py")) == 1
assert sf2(self.makeext("what/ever.py/foo")) == 1
def testGlobStarExclude(self):
"""Test a few glob excludes, including **"""
sf1 = self.Select.glob_get_sf("**", 0)
assert sf1(self.makeext("/usr/local/bin")) == 0
sf2 = self.Select.glob_get_sf("**.py", 0)
assert sf2(self.makeext("foo")) == None, sf2(self.makeext("foo"))
assert sf2(self.makeext("usr/local/bin")) == None
assert sf2(self.makeext("what/ever.py")) == 0
assert sf2(self.makeext("what/ever.py/foo")) == 0
def testFilelistInclude(self):
"""Test included filelist"""
fp = StringIO.StringIO("""
testfiles/select/1/2
testfiles/select/1
testfiles/select/1/2/3
testfiles/select/3/3/2""")
sf = self.Select.filelist_get_sf(fp, 1, "test")
assert sf(self.root) == 1
assert sf(self.makeext("1")) == 1
assert sf(self.makeext("1/1")) == None
assert sf(self.makeext("1/2/3")) == 1
assert sf(self.makeext("2/2")) == None
assert sf(self.makeext("3")) == 1
assert sf(self.makeext("3/3")) == 1
assert sf(self.makeext("3/3/3")) == None
def testFilelistIncludeNullSep(self):
"""Test included filelist but with null_separator set"""
fp = StringIO.StringIO("""\0testfiles/select/1/2\0testfiles/select/1\0testfiles/select/1/2/3\0testfiles/select/3/3/2\0testfiles/select/hello\nthere\0""")
globals.null_separator = 1
sf = self.Select.filelist_get_sf(fp, 1, "test")
assert sf(self.root) == 1
assert sf(self.makeext("1")) == 1
assert sf(self.makeext("1/1")) == None
assert sf(self.makeext("1/2/3")) == 1
assert sf(self.makeext("2/2")) == None
assert sf(self.makeext("3")) == 1
assert sf(self.makeext("3/3")) == 1
assert sf(self.makeext("3/3/3")) == None
assert sf(self.makeext("hello\nthere")) == 1
globals.null_separator = 0
def testFilelistExclude(self):
"""Test included filelist"""
fp = StringIO.StringIO("""
testfiles/select/1/2
testfiles/select/1
this is a badly formed line which should be ignored
testfiles/select/1/2/3
testfiles/select/3/3/2""")
sf = self.Select.filelist_get_sf(fp, 0, "test")
assert sf(self.root) == None
assert sf(self.makeext("1")) == 0
assert sf(self.makeext("1/1")) == 0
assert sf(self.makeext("1/2/3")) == 0
assert sf(self.makeext("2/2")) == None
assert sf(self.makeext("3")) == None
assert sf(self.makeext("3/3/2")) == 0
assert sf(self.makeext("3/3/3")) == None
def testFilelistInclude2(self):
"""testFilelistInclude2 - with modifiers"""
fp = StringIO.StringIO("""
testfiles/select/1/1
- testfiles/select/1/2
+ testfiles/select/1/3
- testfiles/select/3""")
sf = self.Select.filelist_get_sf(fp, 1, "test1")
assert sf(self.makeext("1")) == 1
assert sf(self.makeext("1/1")) == 1
assert sf(self.makeext("1/1/2")) == None
assert sf(self.makeext("1/2")) == 0
assert sf(self.makeext("1/2/3")) == 0
assert sf(self.makeext("1/3")) == 1
assert sf(self.makeext("2")) == None
assert sf(self.makeext("3")) == 0
def testFilelistExclude2(self):
"""testFilelistExclude2 - with modifiers"""
fp = StringIO.StringIO("""
testfiles/select/1/1
- testfiles/select/1/2
+ testfiles/select/1/3
- testfiles/select/3""")
sf = self.Select.filelist_get_sf(fp, 0, "test1")
sf_val1 = sf(self.root)
assert sf_val1 == 1 or sf_val1 == None # either is OK
sf_val2 = sf(self.makeext("1"))
assert sf_val2 == 1 or sf_val2 == None
assert sf(self.makeext("1/1")) == 0
assert sf(self.makeext("1/1/2")) == 0
assert sf(self.makeext("1/2")) == 0
assert sf(self.makeext("1/2/3")) == 0
assert sf(self.makeext("1/3")) == 1
assert sf(self.makeext("2")) == None
assert sf(self.makeext("3")) == 0
def testGlobRE(self):
"""testGlobRE - test translation of shell pattern to regular exp"""
assert self.Select.glob_to_re("hello") == "hello"
assert self.Select.glob_to_re(".e?ll**o") == "\\.e[^/]ll.*o"
r = self.Select.glob_to_re("[abc]el[^de][!fg]h")
assert r == "[abc]el[^de][^fg]h", r
r = self.Select.glob_to_re("/usr/*/bin/")
assert r == "\\/usr\\/[^/]*\\/bin\\/", r
assert self.Select.glob_to_re("[a.b/c]") == "[a.b/c]"
r = self.Select.glob_to_re("[a*b-c]e[!]]")
assert r == "[a*b-c]e[^]]", r
def testGlobSFException(self):
"""testGlobSFException - see if globbing errors returned"""
self.assertRaises(GlobbingError, self.Select.glob_get_normal_sf,
"testfiles/select/hello//there", 1)
self.assertRaises(FilePrefixError,
self.Select.glob_get_sf, "testfiles/whatever", 1)
self.assertRaises(FilePrefixError,
self.Select.glob_get_sf, "testfiles/?hello", 0)
assert self.Select.glob_get_normal_sf("**", 1)
def testIgnoreCase(self):
"""testIgnoreCase - try a few expressions with ignorecase:"""
sf = self.Select.glob_get_sf("ignorecase:testfiles/SeLect/foo/bar", 1)
assert sf(self.makeext("FOO/BAR")) == 1
assert sf(self.makeext("foo/bar")) == 1
assert sf(self.makeext("fOo/BaR")) == 1
self.assertRaises(FilePrefixError, self.Select.glob_get_sf,
"ignorecase:tesfiles/sect/foo/bar", 1)
def testRoot(self):
"""testRoot - / may be a counterexample to several of these.."""
root = Path("/")
select = Select(root)
assert select.glob_get_sf("/", 1)(root) == 1
assert select.glob_get_sf("/foo", 1)(root) == 1
assert select.glob_get_sf("/foo/bar", 1)(root) == 1
assert select.glob_get_sf("/", 0)(root) == 0
assert select.glob_get_sf("/foo", 0)(root) == None
assert select.glob_get_sf("**.py", 1)(root) == 2
assert select.glob_get_sf("**", 1)(root) == 1
assert select.glob_get_sf("ignorecase:/", 1)(root) == 1
assert select.glob_get_sf("**.py", 0)(root) == None
assert select.glob_get_sf("**", 0)(root) == 0
assert select.glob_get_sf("/foo/*", 0)(root) == None
assert select.filelist_get_sf(StringIO.StringIO("/"), 1, "test")(root) == 1
assert select.filelist_get_sf(StringIO.StringIO("/foo/bar"), 1, "test")(root) == 1
assert select.filelist_get_sf(StringIO.StringIO("/"), 0, "test")(root) == 0
assert select.filelist_get_sf(StringIO.StringIO("/foo/bar"), 0,
"test")(root) == None
def testOtherFilesystems(self):
"""Test to see if --exclude-other-filesystems works correctly"""
root = Path("/")
select = Select(root)
sf = select.other_filesystems_get_sf(0)
assert sf(root) is None
if os.path.ismount("/usr/bin"):
sfval = 0
else:
sfval = None
assert sf(Path("/usr/bin")) == sfval, \
"Assumption: /usr/bin is on the same filesystem as /"
if os.path.ismount("/dev"):
sfval = 0
else:
sfval = None
assert sf(Path("/dev")) == sfval, \
"Assumption: /dev is on a different filesystem"
if os.path.ismount("/proc"):
sfval = 0
else:
sfval = None
assert sf(Path("/proc")) == sfval, \
"Assumption: /proc is on a different filesystem"
class ParseArgsTest(unittest.TestCase):
"""Test argument parsing"""
def setUp(self):
assert not os.system("tar xzf testfiles.tar.gz > /dev/null 2>&1")
def tearDown(self):
assert not os.system("rm -rf testfiles tempdir temp2.tar")
root = None
def ParseTest(self, tuplelist, indicies, filelists = []):
"""No error if running select on tuple goes over indicies"""
if not self.root:
self.root = Path("testfiles/select")
self.Select = Select(self.root)
self.Select.ParseArgs(tuplelist, self.remake_filelists(filelists))
self.Select.set_iter()
assert Iter.equal(Iter.map(lambda path: path.index, self.Select),
iter(indicies), verbose = 1)
def remake_filelists(self, filelist):
"""Turn strings in filelist into fileobjs"""
new_filelists = []
for f in filelist:
if type(f) is types.StringType:
new_filelists.append(StringIO.StringIO(f))
else:
new_filelists.append(f)
return new_filelists
def testParse(self):
"""Test just one include, all exclude"""
self.ParseTest([("--include", "testfiles/select/1/1"),
("--exclude", "**")],
[(), ('1',), ("1", "1"), ("1", '1', '1'),
('1', '1', '2'), ('1', '1', '3')])
def testParse2(self):
"""Test three level include/exclude"""
self.ParseTest([("--exclude", "testfiles/select/1/1/1"),
("--include", "testfiles/select/1/1"),
("--exclude", "testfiles/select/1"),
("--exclude", "**")],
[(), ('1',), ('1', '1'), ('1', '1', '2'),
('1', '1', '3')])
def test_globbing_filelist(self):
"""Filelist glob test similar to above testParse2"""
self.ParseTest([("--include-globbing-filelist", "file")],
[(), ('1',), ('1', '1'), ('1', '1', '2'),
('1', '1', '3')],
["""
- testfiles/select/1/1/1
testfiles/select/1/1
- testfiles/select/1
- **
"""])
def testGlob(self):
"""Test globbing expression"""
self.ParseTest([("--exclude", "**[3-5]"),
("--include", "testfiles/select/1"),
("--exclude", "**")],
[(), ('1',), ('1', '1'),
('1', '1', '1'), ('1', '1', '2'),
('1', '2'), ('1', '2', '1'), ('1', '2', '2')])
self.ParseTest([("--include", "testfiles/select**/2"),
("--exclude", "**")],
[(), ('1',), ('1', '1'),
('1', '1', '2'),
('1', '2'),
('1', '2', '1'), ('1', '2', '2'), ('1', '2', '3'),
('1', '3'),
('1', '3', '2'),
('2',), ('2', '1'),
('2', '1', '1'), ('2', '1', '2'), ('2', '1', '3'),
('2', '2'),
('2', '2', '1'), ('2', '2', '2'), ('2', '2', '3'),
('2', '3'),
('2', '3', '1'), ('2', '3', '2'), ('2', '3', '3'),
('3',), ('3', '1'),
('3', '1', '2'),
('3', '2'),
('3', '2', '1'), ('3', '2', '2'), ('3', '2', '3'),
('3', '3'),
('3', '3', '2')])
def test_globbing_filelist2(self):
"""Filelist glob test similar to above testGlob"""
self.ParseTest([("--exclude-globbing-filelist", "asoeuth")],
[(), ('1',), ('1', '1'),
('1', '1', '1'), ('1', '1', '2'),
('1', '2'), ('1', '2', '1'), ('1', '2', '2')],
["""
**[3-5]
+ testfiles/select/1
**
"""])
self.ParseTest([("--include-globbing-filelist", "file")],
[(), ('1',), ('1', '1'),
('1', '1', '2'),
('1', '2'),
('1', '2', '1'), ('1', '2', '2'), ('1', '2', '3'),
('1', '3'),
('1', '3', '2'),
('2',), ('2', '1'),
('2', '1', '1'), ('2', '1', '2'), ('2', '1', '3'),
('2', '2'),
('2', '2', '1'), ('2', '2', '2'), ('2', '2', '3'),
('2', '3'),
('2', '3', '1'), ('2', '3', '2'), ('2', '3', '3'),
('3',), ('3', '1'),
('3', '1', '2'),
('3', '2'),
('3', '2', '1'), ('3', '2', '2'), ('3', '2', '3'),
('3', '3'),
('3', '3', '2')],
["""
testfiles/select**/2
- **
"""])
def testGlob2(self):
"""Test more globbing functions"""
self.ParseTest([("--include", "testfiles/select/*foo*/p*"),
("--exclude", "**")],
[(), ('efools',), ('efools', 'ping'),
('foobar',), ('foobar', 'pong')])
self.ParseTest([("--exclude", "testfiles/select/1/1/*"),
("--exclude", "testfiles/select/1/2/**"),
("--exclude", "testfiles/select/1/3**"),
("--include", "testfiles/select/1"),
("--exclude", "**")],
[(), ('1',), ('1', '1'), ('1', '2')])
def testGlob3(self):
""" regression test for bug 25230 """
self.ParseTest([("--include", "testfiles/select/**1"),
("--include", "testfiles/select/**2"),
("--exclude", "**")],
[(), ('1',), ('1', '1'),
('1', '1', '1'), ('1', '1', '2'), ('1', '1', '3'),
('1', '2'),
('1', '2', '1'), ('1', '2', '2'), ('1', '2', '3'),
('1', '3'),
('1', '3', '1'), ('1', '3', '2'), ('1', '3', '3'),
('2',), ('2', '1'),
('2', '1', '1'), ('2', '1', '2'), ('2', '1', '3'),
('2', '2'),
('2', '2', '1'), ('2', '2', '2'), ('2', '2', '3'),
('2', '3'),
('2', '3', '1'), ('2', '3', '2'), ('2', '3', '3'),
('3',), ('3', '1'),
('3', '1', '1'), ('3', '1', '2'), ('3', '1', '3'),
('3', '2'),
('3', '2', '1'), ('3', '2', '2'), ('3', '2', '3'),
('3', '3'),
('3', '3', '1'), ('3', '3', '2')])
def testAlternateRoot(self):
"""Test select with different root"""
self.root = Path("testfiles/select/1")
self.ParseTest([("--exclude", "testfiles/select/1/[23]")],
[(), ('1',), ('1', '1'), ('1', '2'), ('1', '3')])
self.root = Path("/")
self.ParseTest([("--exclude", "/home/*"),
("--include", "/home"),
("--exclude", "/")],
[(), ("home",)])
if __name__ == "__main__":
unittest.main()
|
ivan-fedorov/intellij-community | refs/heads/master | python/testData/psi/Await.py | 19 | async def f(x):
await x
await = 1
|
benhamner/FacebookRecruitingCompetition | refs/heads/master | Benchmarks/utilities.py | 1 | import csv
def edges_generator(file_name):
"""
Generator that returns edges given a 2-column csv graph file
"""
f = open(file_name)
reader = csv.reader(f)
# Ignore the header
reader.next()
for edges in reader:
nodes = [int(node) for node in edges]
yield nodes
f.close()
def read_graph(file_name):
"""
Reads a sparsely represented directed graph into a dictionary
"""
# Store the graph as a dictionary of edges
graph = {}
def initialize_node(node):
if node not in graph:
graph[node] = []
for nodes in edges_generator(file_name):
for node in nodes:
initialize_node(node)
graph[nodes[0]].append(nodes[1])
return graph
def read_nodes_list(test_file):
"""
Reads of single-column list of nodes
"""
f = open(test_file)
reader = csv.reader(f)
reader.next() # ignore header
nodes = []
for row in reader:
nodes.append(int(row[0]))
return nodes
f.close()
def write_submission_file(submission_file, test_nodes, test_predictions):
"""
Writes the submission file
"""
f = open(submission_file, "w")
writer = csv.writer(f)
writer.writerow(["source_node", "destination_nodes"])
for source_node, dest_nodes in zip(test_nodes, test_predictions):
writer.writerow([str(source_node),
" ".join([str(n) for n in dest_nodes])])
f.close()
|
sdague/home-assistant | refs/heads/dev | homeassistant/components/bbox/device_tracker.py | 19 | """Support for French FAI Bouygues Bbox routers."""
from collections import namedtuple
from datetime import timedelta
import logging
from typing import List
import pybbox
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "192.168.1.254"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "name", "ip", "last_update"])
class BboxDeviceScanner(DeviceScanner):
"""This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Get host from config."""
self.host = config[CONF_HOST]
"""Initialize the scanner."""
self.last_results: List[Device] = []
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result.name for result in self.last_results if result.mac == device
]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device["active"] != 1:
continue
last_results.append(
Device(
device["macaddress"], device["hostname"], device["ipaddress"], now
)
)
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
|
HewlettPackard/oneview-ansible | refs/heads/master | library/oneview_power_device.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_power_device
short_description: Manage OneView Power Device resources.
description:
- "Provides an interface to manage Power delivery devices resources. Can add, update, remove, change power state,
change UID state and refresh state."
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 3.1.0"
author: "Gustavo Hennig (@GustavoHennig)"
options:
state:
description:
- Indicates the desired state for the Power Device resource.
C(present) will ensure data properties are compliant with OneView.
C(discovered) will add an iPDU to the OneView.
C(absent) will remove the resource from OneView, if it exists.
C(power_state_set) will set the power state of the Power Device.
C(refresh_state_set) will set the refresh state of the Power Device.
C(uid_state_set) will set the UID state of the Power Device.
choices: ['present', 'discovered', 'absent', 'power_state_set', 'refresh_state_set', 'uid_state_set']
required: true
data:
description:
- List with Power Device properties and its associated states.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Add a Power Device
oneview_power_device:
config: "{{ config }}"
state: present
data:
name: 'Power Device Name'
ratedCapacity: 40
delegate_to: localhost
- name: Add an iPDU
oneview_power_device:
config: "{{ config }}"
state: discovered
data:
hostname : '{{ power_device_hostname }}'
username : '{{ power_device_username }}'
password : '{{ power_device_password }}'
force : false
delegate_to: localhost
- name: Power off the Power Device
oneview_power_device:
config: "{{ config }}"
state: power_state_set
data:
name: 'Power Device Name'
powerStateData:
powerState: "Off"
delegate_to: localhost
- name: Refresh the Power Device
oneview_power_device:
config: "{{ config }}"
state: refresh_state_set
data:
name: 'Power Device Name'
refreshStateData:
refreshState : "RefreshPending"
delegate_to: localhost
- name: Set UID light state of the Power Device on
oneview_power_device:
config: "{{ config }}"
state: uid_state_set
data:
name: 'Power Device Name'
uidStateData:
uidState: "On"
delegate_to: localhost
- name: Remove the Power Device by its name
oneview_power_device:
config: "{{ config }}"
state: absent
data:
name: 'Power Device Name'
delegate_to: localhost
'''
RETURN = '''
power_device:
description: Has the OneView facts about the Power Device.
returned: On states 'present', 'discovered', 'power_state_set', 'refresh_state_set', 'uid_state_set'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError, OneViewModuleResourceNotFound
class PowerDeviceModule(OneViewModuleBase):
MSG_CREATED = 'Power Device added successfully.'
MSG_IPDU_ADDED = 'iPDU added successfully.'
MSG_ALREADY_PRESENT = 'Power Device is already present.'
MSG_DELETED = 'Power Device deleted successfully.'
MSG_UPDATED = 'Power Device updated successfully.'
MSG_ALREADY_ABSENT = 'Power Device is already absent.'
MSG_MANDATORY_FIELD_MISSING = "Mandatory field was not informed: data.name"
MSG_POWER_STATE_UPDATED = 'Power Device power state changed successfully.'
MSG_REFRESH_STATE_UPDATED = 'Power Device refresh state changed successfully.'
MSG_UID_STATE_UPDATED = 'Power Device UID state changed successfully.'
MSG_NOT_FOUND = 'Power Device was not found for this operation.'
argument_spec = dict(
state=dict(
required=True,
choices=['present', 'absent', 'power_state_set', 'refresh_state_set', 'uid_state_set', 'discovered']
),
data=dict(required=True, type='dict')
)
def __init__(self):
super(PowerDeviceModule, self).__init__(additional_arg_spec=self.argument_spec,
validate_etag_support=True)
self.resource_client = self.oneview_client.power_devices
def execute_module(self):
changed, msg, ansible_facts = False, '', {}
if self.state == 'discovered':
changed, msg, ansible_facts = self.__discover(self.data)
else:
if not self.data.get('name'):
raise OneViewModuleValueError(self.MSG_MANDATORY_FIELD_MISSING)
resource = self.get_by_name(self.data['name'])
if self.state == 'present':
return self.resource_present(resource, 'power_device', 'add')
elif self.state == 'absent':
return self.resource_absent(resource, 'remove')
elif self.state == 'power_state_set':
changed, msg, ansible_facts = self.__set_power_state(self.data, resource)
elif self.state == 'refresh_state_set':
changed, msg, ansible_facts = self.__set_refresh_state(self.data, resource)
elif self.state == 'uid_state_set':
changed, msg, ansible_facts = self.__set_uid_state(self.data, resource)
return dict(changed=changed,
msg=msg,
ansible_facts=ansible_facts)
def __discover(self, data):
resource = self.oneview_client.power_devices.add_ipdu(data)
return True, self.MSG_IPDU_ADDED, dict(power_device=resource)
def __check_resource(self, resource):
if not resource:
raise OneViewModuleResourceNotFound(self.MSG_NOT_FOUND)
def __set_power_state(self, data, resource):
self.__check_resource(resource)
resource = self.oneview_client.power_devices.update_power_state(resource['uri'], data['powerStateData'])
return True, self.MSG_POWER_STATE_UPDATED, dict(power_device=resource)
def __set_uid_state(self, data, resource):
self.__check_resource(resource)
resource = self.oneview_client.power_devices.update_uid_state(resource['uri'], data['uidStateData'])
return True, self.MSG_UID_STATE_UPDATED, dict(power_device=resource)
def __set_refresh_state(self, data, resource):
self.__check_resource(resource)
resource = self.oneview_client.power_devices.update_refresh_state(resource['uri'], data['refreshStateData'])
return True, self.MSG_REFRESH_STATE_UPDATED, dict(power_device=resource)
def main():
PowerDeviceModule().run()
if __name__ == '__main__':
main()
|
lobachevzky/NLP-Project | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py | 488 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
|
LingxiaoJIA/gem5 | refs/heads/master | util/stats/db.py | 90 | # Copyright (c) 2003-2004 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import MySQLdb, re, string
def statcmp(a, b):
v1 = a.split('.')
v2 = b.split('.')
last = min(len(v1), len(v2)) - 1
for i,j in zip(v1[0:last], v2[0:last]):
if i != j:
return cmp(i, j)
# Special compare for last element.
if len(v1) == len(v2):
return cmp(v1[last], v2[last])
else:
return cmp(len(v1), len(v2))
class RunData:
def __init__(self, row):
self.run = int(row[0])
self.name = row[1]
self.user = row[2]
self.project = row[3]
class SubData:
def __init__(self, row):
self.stat = int(row[0])
self.x = int(row[1])
self.y = int(row[2])
self.name = row[3]
self.descr = row[4]
class Data:
def __init__(self, row):
if len(row) != 5:
raise 'stat db error'
self.stat = int(row[0])
self.run = int(row[1])
self.x = int(row[2])
self.y = int(row[3])
self.data = float(row[4])
def __repr__(self):
return '''Data(['%d', '%d', '%d', '%d', '%f'])''' % ( self.stat,
self.run, self.x, self.y, self.data)
class StatData(object):
def __init__(self, row):
self.stat = int(row[0])
self.name = row[1]
self.desc = row[2]
self.type = row[3]
self.prereq = int(row[5])
self.precision = int(row[6])
import flags
self.flags = 0
if int(row[4]): self.flags |= flags.printable
if int(row[7]): self.flags |= flags.nozero
if int(row[8]): self.flags |= flags.nonan
if int(row[9]): self.flags |= flags.total
if int(row[10]): self.flags |= flags.pdf
if int(row[11]): self.flags |= flags.cdf
if self.type == 'DIST' or self.type == 'VECTORDIST':
self.min = float(row[12])
self.max = float(row[13])
self.bktsize = float(row[14])
self.size = int(row[15])
if self.type == 'FORMULA':
self.formula = self.db.allFormulas[self.stat]
class Node(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Result(object):
def __init__(self, x, y):
self.data = {}
self.x = x
self.y = y
def __contains__(self, run):
return run in self.data
def __getitem__(self, run):
if run not in self.data:
self.data[run] = [ [ 0.0 ] * self.y for i in xrange(self.x) ]
return self.data[run]
class Database(object):
def __init__(self):
self.host = 'zizzer.pool'
self.user = ''
self.passwd = ''
self.db = 'm5stats'
self.cursor = None
self.allStats = []
self.allStatIds = {}
self.allStatNames = {}
self.allSubData = {}
self.allRuns = []
self.allRunIds = {}
self.allRunNames = {}
self.allFormulas = {}
self.stattop = {}
self.statdict = {}
self.statlist = []
self.mode = 'sum';
self.runs = None
self.ticks = None
self.method = 'sum'
self._method = type(self).sum
def get(self, job, stat, system=None):
run = self.allRunNames.get(str(job), None)
if run is None:
return None
from info import ProxyError, scalar, vector, value, values, total, len
if system is None and hasattr(job, 'system'):
system = job.system
if system is not None:
stat.system = self[system]
try:
if scalar(stat):
return value(stat, run.run)
if vector(stat):
return values(stat, run.run)
except ProxyError:
return None
return None
def query(self, sql):
self.cursor.execute(sql)
def update_dict(self, dict):
dict.update(self.stattop)
def append(self, stat):
statname = re.sub(':', '__', stat.name)
path = string.split(statname, '.')
pathtop = path[0]
fullname = ''
x = self
while len(path) > 1:
name = path.pop(0)
if not x.__dict__.has_key(name):
x.__dict__[name] = Node(fullname + name)
x = x.__dict__[name]
fullname = '%s%s.' % (fullname, name)
name = path.pop(0)
x.__dict__[name] = stat
self.stattop[pathtop] = self.__dict__[pathtop]
self.statdict[statname] = stat
self.statlist.append(statname)
def connect(self):
# connect
self.thedb = MySQLdb.connect(db=self.db,
host=self.host,
user=self.user,
passwd=self.passwd)
# create a cursor
self.cursor = self.thedb.cursor()
self.query('''select rn_id,rn_name,rn_sample,rn_user,rn_project
from runs''')
for result in self.cursor.fetchall():
run = RunData(result);
self.allRuns.append(run)
self.allRunIds[run.run] = run
self.allRunNames[run.name] = run
self.query('select sd_stat,sd_x,sd_y,sd_name,sd_descr from subdata')
for result in self.cursor.fetchall():
subdata = SubData(result)
if self.allSubData.has_key(subdata.stat):
self.allSubData[subdata.stat].append(subdata)
else:
self.allSubData[subdata.stat] = [ subdata ]
self.query('select * from formulas')
for id,formula in self.cursor.fetchall():
self.allFormulas[int(id)] = formula.tostring()
StatData.db = self
self.query('select * from stats')
import info
for result in self.cursor.fetchall():
stat = info.NewStat(self, StatData(result))
self.append(stat)
self.allStats.append(stat)
self.allStatIds[stat.stat] = stat
self.allStatNames[stat.name] = stat
# Name: listruns
# Desc: Prints all runs matching a given user, if no argument
# is given all runs are returned
def listRuns(self, user=None):
print '%-40s %-10s %-5s' % ('run name', 'user', 'id')
print '-' * 62
for run in self.allRuns:
if user == None or user == run.user:
print '%-40s %-10s %-10d' % (run.name, run.user, run.run)
# Name: listTicks
# Desc: Prints all samples for a given run
def listTicks(self, runs=None):
print "tick"
print "----------------------------------------"
sql = 'select distinct dt_tick from data where dt_stat=1180 and ('
if runs != None:
first = True
for run in runs:
if first:
# sql += ' where'
first = False
else:
sql += ' or'
sql += ' dt_run=%s' % run.run
sql += ')'
self.query(sql)
for r in self.cursor.fetchall():
print r[0]
# Name: retTicks
# Desc: Prints all samples for a given run
def retTicks(self, runs=None):
sql = 'select distinct dt_tick from data where dt_stat=1180 and ('
if runs != None:
first = True
for run in runs:
if first:
first = False
else:
sql += ' or'
sql += ' dt_run=%s' % run.run
sql += ')'
self.query(sql)
ret = []
for r in self.cursor.fetchall():
ret.append(r[0])
return ret
# Name: liststats
# Desc: Prints all statistics that appear in the database,
# the optional argument is a regular expression that can
# be used to prune the result set
def listStats(self, regex=None):
print '%-60s %-8s %-10s' % ('stat name', 'id', 'type')
print '-' * 80
rx = None
if regex != None:
rx = re.compile(regex)
stats = [ stat.name for stat in self.allStats ]
stats.sort(statcmp)
for stat in stats:
stat = self.allStatNames[stat]
if rx == None or rx.match(stat.name):
print '%-60s %-8s %-10s' % (stat.name, stat.stat, stat.type)
# Name: liststats
# Desc: Prints all statistics that appear in the database,
# the optional argument is a regular expression that can
# be used to prune the result set
def listFormulas(self, regex=None):
print '%-60s %s' % ('formula name', 'formula')
print '-' * 80
rx = None
if regex != None:
rx = re.compile(regex)
stats = [ stat.name for stat in self.allStats ]
stats.sort(statcmp)
for stat in stats:
stat = self.allStatNames[stat]
if stat.type == 'FORMULA' and (rx == None or rx.match(stat.name)):
print '%-60s %s' % (stat.name, self.allFormulas[stat.stat])
def getStat(self, stats):
if type(stats) is not list:
stats = [ stats ]
ret = []
for stat in stats:
if type(stat) is int:
ret.append(self.allStatIds[stat])
if type(stat) is str:
rx = re.compile(stat)
for stat in self.allStats:
if rx.match(stat.name):
ret.append(stat)
return ret
#########################################
# get the data
#
def query(self, op, stat, ticks, group=False):
sql = 'select '
sql += 'dt_stat as stat, '
sql += 'dt_run as run, '
sql += 'dt_x as x, '
sql += 'dt_y as y, '
if group:
sql += 'dt_tick as tick, '
sql += '%s(dt_data) as data ' % op
sql += 'from data '
sql += 'where '
if isinstance(stat, list):
val = ' or '.join([ 'dt_stat=%d' % s.stat for s in stat ])
sql += ' (%s)' % val
else:
sql += ' dt_stat=%d' % stat.stat
if self.runs != None and len(self.runs):
val = ' or '.join([ 'dt_run=%d' % r for r in self.runs ])
sql += ' and (%s)' % val
if ticks != None and len(ticks):
val = ' or '.join([ 'dt_tick=%d' % s for s in ticks ])
sql += ' and (%s)' % val
sql += ' group by dt_stat,dt_run,dt_x,dt_y'
if group:
sql += ',dt_tick'
return sql
# Name: sum
# Desc: given a run, a stat and an array of samples, total the samples
def sum(self, *args, **kwargs):
return self.query('sum', *args, **kwargs)
# Name: avg
# Desc: given a run, a stat and an array of samples, average the samples
def avg(self, stat, ticks):
return self.query('avg', *args, **kwargs)
# Name: stdev
# Desc: given a run, a stat and an array of samples, get the standard
# deviation
def stdev(self, stat, ticks):
return self.query('stddev', *args, **kwargs)
def __setattr__(self, attr, value):
super(Database, self).__setattr__(attr, value)
if attr != 'method':
return
if value == 'sum':
self._method = self.sum
elif value == 'avg':
self._method = self.avg
elif value == 'stdev':
self._method = self.stdev
else:
raise AttributeError, "can only set get to: sum | avg | stdev"
def data(self, stat, ticks=None):
if ticks is None:
ticks = self.ticks
sql = self._method(self, stat, ticks)
self.query(sql)
runs = {}
xmax = 0
ymax = 0
for x in self.cursor.fetchall():
data = Data(x)
if not runs.has_key(data.run):
runs[data.run] = {}
if not runs[data.run].has_key(data.x):
runs[data.run][data.x] = {}
xmax = max(xmax, data.x)
ymax = max(ymax, data.y)
runs[data.run][data.x][data.y] = data.data
results = Result(xmax + 1, ymax + 1)
for run,data in runs.iteritems():
result = results[run]
for x,ydata in data.iteritems():
for y,data in ydata.iteritems():
result[x][y] = data
return results
def __getitem__(self, key):
return self.stattop[key]
|
AutorestCI/azure-sdk-for-python | refs/heads/master | azure-mgmt-logic/azure/mgmt/logic/models/integration_account_sku.py | 4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IntegrationAccountSku(Model):
"""The integration account sku.
:param name: The sku name. Possible values include: 'NotSpecified',
'Free', 'Standard'
:type name: str or :class:`IntegrationAccountSkuName
<azure.mgmt.logic.models.IntegrationAccountSkuName>`
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'IntegrationAccountSkuName'},
}
def __init__(self, name):
self.name = name
|
mspark93/VTK | refs/heads/master | ThirdParty/Twisted/twisted/test/test_threadable.py | 34 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadable}.
"""
from __future__ import division, absolute_import
import sys, pickle
try:
import threading
except ImportError:
threadingSkip = "Platform lacks thread support"
else:
threadingSkip = None
from twisted.python.compat import _PY3
from twisted.trial import unittest
from twisted.python import threadable
class TestObject:
synchronized = ['aMethod']
x = -1
y = 1
def aMethod(self):
for i in range(10):
self.x, self.y = self.y, self.x
self.z = self.x + self.y
assert self.z == 0, "z == %d, not 0 as expected" % (self.z,)
threadable.synchronize(TestObject)
class SynchronizationTestCase(unittest.SynchronousTestCase):
def setUp(self):
"""
Reduce the CPython check interval so that thread switches happen much
more often, hopefully exercising more possible race conditions. Also,
delay actual test startup until the reactor has been started.
"""
if _PY3:
if getattr(sys, 'getswitchinterval', None) is not None:
self.addCleanup(sys.setswitchinterval, sys.getswitchinterval())
sys.setswitchinterval(0.0000001)
else:
if getattr(sys, 'getcheckinterval', None) is not None:
self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
sys.setcheckinterval(7)
def test_synchronizedName(self):
"""
The name of a synchronized method is inaffected by the synchronization
decorator.
"""
self.assertEqual("aMethod", TestObject.aMethod.__name__)
def test_isInIOThread(self):
"""
L{threadable.isInIOThread} returns C{True} if and only if it is called
in the same thread as L{threadable.registerAsIOThread}.
"""
threadable.registerAsIOThread()
foreignResult = []
t = threading.Thread(
target=lambda: foreignResult.append(threadable.isInIOThread()))
t.start()
t.join()
self.assertFalse(
foreignResult[0], "Non-IO thread reported as IO thread")
self.assertTrue(
threadable.isInIOThread(), "IO thread reported as not IO thread")
def testThreadedSynchronization(self):
o = TestObject()
errors = []
def callMethodLots():
try:
for i in range(1000):
o.aMethod()
except AssertionError as e:
errors.append(str(e))
threads = []
for x in range(5):
t = threading.Thread(target=callMethodLots)
threads.append(t)
t.start()
for t in threads:
t.join()
if errors:
raise unittest.FailTest(errors)
if threadingSkip is not None:
testThreadedSynchronization.skip = threadingSkip
test_isInIOThread.skip = threadingSkip
def testUnthreadedSynchronization(self):
o = TestObject()
for i in range(1000):
o.aMethod()
class SerializationTestCase(unittest.SynchronousTestCase):
def testPickling(self):
lock = threadable.XLock()
lockType = type(lock)
lockPickle = pickle.dumps(lock)
newLock = pickle.loads(lockPickle)
self.assertTrue(isinstance(newLock, lockType))
if threadingSkip is not None:
testPickling.skip = threadingSkip
def testUnpickling(self):
lockPickle = b'ctwisted.python.threadable\nunpickle_lock\np0\n(tp1\nRp2\n.'
lock = pickle.loads(lockPickle)
newPickle = pickle.dumps(lock, 2)
newLock = pickle.loads(newPickle)
|
utecuy/edx-platform | refs/heads/master | lms/djangoapps/ccx/tests/test_overrides.py | 33 | # coding=UTF-8
"""
tests for overrides
"""
import datetime
import mock
import pytz
from nose.plugins.attrib import attr
from courseware.field_overrides import OverrideFieldData # pylint: disable=import-error
from django.test.utils import override_settings
from request_cache.middleware import RequestCache
from student.tests.factories import AdminFactory # pylint: disable=import-error
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..models import CustomCourseForEdX
from ..overrides import override_field_for_ccx
from .test_views import flatten, iter_blocks
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
class TestFieldOverrides(ModuleStoreTestCase):
"""
Make sure field overrides behave in the expected manner.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestFieldOverrides, self).setUp()
self.course = course = CourseFactory.create()
self.course.enable_ccx = True
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
self.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=course)
for _ in xrange(2)]
sequentials = flatten([
[ItemFactory.create(parent=chapter) for _ in xrange(2)]
for chapter in chapters])
verticals = flatten([
[ItemFactory.create(due=due, parent=sequential) for _ in xrange(2)]
for sequential in sequentials])
blocks = flatten([ # pylint: disable=unused-variable
[ItemFactory.create(parent=vertical) for _ in xrange(2)]
for vertical in verticals])
self.ccx = ccx = CustomCourseForEdX(
course_id=course.id,
display_name='Test CCX',
coach=AdminFactory.create())
ccx.save()
patch = mock.patch('ccx.overrides.get_current_ccx')
self.get_ccx = get_ccx = patch.start()
get_ccx.return_value = ccx
self.addCleanup(patch.stop)
self.addCleanup(RequestCache.clear_request_cache)
# Apparently the test harness doesn't use LmsFieldStorage, and I'm not
# sure if there's a way to poke the test harness to do so. So, we'll
# just inject the override field storage in this brute force manner.
OverrideFieldData.provider_classes = None
for block in iter_blocks(ccx.course):
block._field_data = OverrideFieldData.wrap( # pylint: disable=protected-access
AdminFactory.create(), course, block._field_data) # pylint: disable=protected-access
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
def test_override_start(self):
"""
Test that overriding start date on a chapter works.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.start, ccx_start)
def test_override_num_queries(self):
"""
Test that overriding and accessing a field produce same number of queries.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
with self.assertNumQueries(3):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
dummy = chapter.start
def test_overriden_field_access_produces_no_extra_queries(self):
"""
Test no extra queries when accessing an overriden field more than once.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
with self.assertNumQueries(3):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
dummy1 = chapter.start
dummy2 = chapter.start
dummy3 = chapter.start
def test_override_is_inherited(self):
"""
Test that sequentials inherit overridden start date from chapter.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.get_children()[0].start, ccx_start)
self.assertEquals(chapter.get_children()[1].start, ccx_start)
def test_override_is_inherited_even_if_set_in_mooc(self):
"""
Test that a due date set on a chapter is inherited by grandchildren
(verticals) even if a due date is set explicitly on grandchildren in
the mooc.
"""
ccx_due = datetime.datetime(2015, 1, 1, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
chapter.display_name = 'itsme!'
override_field_for_ccx(self.ccx, chapter, 'due', ccx_due)
vertical = chapter.get_children()[0].get_children()[0]
self.assertEqual(vertical.due, ccx_due)
|
Orav/kbengine | refs/heads/master | kbe/src/lib/python/Doc/tools/sphinxext/suspicious.py | 1 | """
Try to detect suspicious constructs, resembling markup
that has leaked into the final output.
Suspicious lines are reported in a comma-separated-file,
``suspicious.csv``, located in the output directory.
The file is utf-8 encoded, and each line contains four fields:
* document name (normalized)
* line number in the source document
* problematic text
* complete line showing the problematic text in context
It is common to find many false positives. To avoid reporting them
again and again, they may be added to the ``ignored.csv`` file
(located in the configuration directory). The file has the same
format as ``suspicious.csv`` with a few differences:
- each line defines a rule; if the rule matches, the issue
is ignored.
- line number may be empty (that is, nothing between the
commas: ",,"). In this case, line numbers are ignored (the
rule matches anywhere in the file).
- the last field does not have to be a complete line; some
surrounding text (never more than a line) is enough for
context.
Rules are processed sequentially. A rule matches when:
* document names are the same
* problematic texts are the same
* line numbers are close to each other (5 lines up or down)
* the rule text is completely contained into the source line
The simplest way to create the ignored.csv file is by copying
undesired entries from suspicious.csv (possibly trimming the last
field.)
Copyright 2009 Gabriel A. Genellina
"""
import os
import re
import csv
import sys
from docutils import nodes
from sphinx.builders import Builder
detect_all = re.compile(r'''
::(?=[^=])| # two :: (but NOT ::=)
:[a-zA-Z][a-zA-Z0-9]+| # :foo
`| # ` (seldom used by itself)
(?<!\.)\.\.[ \t]*\w+: # .. foo: (but NOT ... else:)
''', re.UNICODE | re.VERBOSE).finditer
py3 = sys.version_info >= (3, 0)
class Rule:
def __init__(self, docname, lineno, issue, line):
"""A rule for ignoring issues"""
self.docname = docname # document to which this rule applies
self.lineno = lineno # line number in the original source;
# this rule matches only near that.
# None -> don't care
self.issue = issue # the markup fragment that triggered this rule
self.line = line # text of the container element (single line only)
self.used = False
def __repr__(self):
return '{0.docname},,{0.issue},{0.line}'.format(self)
class dialect(csv.excel):
"""Our dialect: uses only linefeed as newline."""
lineterminator = '\n'
class CheckSuspiciousMarkupBuilder(Builder):
"""
Checks for possibly invalid markup that may leak into the output.
"""
name = 'suspicious'
def init(self):
# create output file
self.log_file_name = os.path.join(self.outdir, 'suspicious.csv')
open(self.log_file_name, 'w').close()
# load database of previously ignored issues
self.load_rules(os.path.join(os.path.dirname(__file__),
'susp-ignored.csv'))
def get_outdated_docs(self):
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
return ''
def prepare_writing(self, docnames):
pass
def write_doc(self, docname, doctree):
# set when any issue is encountered in this document
self.any_issue = False
self.docname = docname
visitor = SuspiciousVisitor(doctree, self)
doctree.walk(visitor)
def finish(self):
unused_rules = [rule for rule in self.rules if not rule.used]
if unused_rules:
self.warn('Found %s/%s unused rules:' %
(len(unused_rules), len(self.rules)))
for rule in unused_rules:
self.info(repr(rule))
return
def check_issue(self, line, lineno, issue):
if not self.is_ignored(line, lineno, issue):
self.report_issue(line, lineno, issue)
def is_ignored(self, line, lineno, issue):
"""Determine whether this issue should be ignored."""
docname = self.docname
for rule in self.rules:
if rule.docname != docname: continue
if rule.issue != issue: continue
# Both lines must match *exactly*. This is rather strict,
# and probably should be improved.
# Doing fuzzy matches with levenshtein distance could work,
# but that means bringing other libraries...
# Ok, relax that requirement: just check if the rule fragment
# is contained in the document line
if rule.line not in line: continue
# Check both line numbers. If they're "near"
# this rule matches. (lineno=None means "don't care")
if (rule.lineno is not None) and \
abs(rule.lineno - lineno) > 5: continue
# if it came this far, the rule matched
rule.used = True
return True
return False
def report_issue(self, text, lineno, issue):
if not self.any_issue: self.info()
self.any_issue = True
self.write_log_entry(lineno, issue, text)
if py3:
self.warn('[%s:%d] "%s" found in "%-.120s"' %
(self.docname, lineno, issue, text))
else:
self.warn('[%s:%d] "%s" found in "%-.120s"' % (
self.docname.encode(sys.getdefaultencoding(),'replace'),
lineno,
issue.encode(sys.getdefaultencoding(),'replace'),
text.strip().encode(sys.getdefaultencoding(),'replace')))
self.app.statuscode = 1
def write_log_entry(self, lineno, issue, text):
if py3:
f = open(self.log_file_name, 'a')
writer = csv.writer(f, dialect)
writer.writerow([self.docname, lineno, issue, text.strip()])
f.close()
else:
f = open(self.log_file_name, 'ab')
writer = csv.writer(f, dialect)
writer.writerow([self.docname.encode('utf-8'),
lineno,
issue.encode('utf-8'),
text.strip().encode('utf-8')])
f.close()
def load_rules(self, filename):
"""Load database of previously ignored issues.
A csv file, with exactly the same format as suspicious.csv
Fields: document name (normalized), line number, issue, surrounding text
"""
self.info("loading ignore rules... ", nonl=1)
self.rules = rules = []
try:
if py3:
f = open(filename, 'r')
else:
f = open(filename, 'rb')
except IOError:
return
for i, row in enumerate(csv.reader(f)):
if len(row) != 4:
raise ValueError(
"wrong format in %s, line %d: %s" % (filename, i+1, row))
docname, lineno, issue, text = row
if lineno:
lineno = int(lineno)
else:
lineno = None
if not py3:
docname = docname.decode('utf-8')
issue = issue.decode('utf-8')
text = text.decode('utf-8')
rule = Rule(docname, lineno, issue, text)
rules.append(rule)
f.close()
self.info('done, %d rules loaded' % len(self.rules))
def get_lineno(node):
"""Obtain line number information for a node."""
lineno = None
while lineno is None and node:
node = node.parent
lineno = node.line
return lineno
def extract_line(text, index):
"""text may be a multiline string; extract
only the line containing the given character index.
>>> extract_line("abc\ndefgh\ni", 6)
>>> 'defgh'
>>> for i in (0, 2, 3, 4, 10):
... print extract_line("abc\ndefgh\ni", i)
abc
abc
abc
defgh
defgh
i
"""
p = text.rfind('\n', 0, index) + 1
q = text.find('\n', index)
if q < 0:
q = len(text)
return text[p:q]
class SuspiciousVisitor(nodes.GenericNodeVisitor):
lastlineno = 0
def __init__(self, document, builder):
nodes.GenericNodeVisitor.__init__(self, document)
self.builder = builder
def default_visit(self, node):
if isinstance(node, (nodes.Text, nodes.image)): # direct text containers
text = node.astext()
# lineno seems to go backwards sometimes (?)
self.lastlineno = lineno = max(get_lineno(node) or 0, self.lastlineno)
seen = set() # don't report the same issue more than only once per line
for match in detect_all(text):
issue = match.group()
line = extract_line(text, match.start())
if (issue, line) not in seen:
self.builder.check_issue(line, lineno, issue)
seen.add((issue, line))
unknown_visit = default_visit
def visit_document(self, node):
self.lastlineno = 0
def visit_comment(self, node):
# ignore comments -- too much false positives.
# (although doing this could miss some errors;
# there were two sections "commented-out" by mistake
# in the Python docs that would not be catched)
raise nodes.SkipNode
|
melon-li/openstack-dashboard | refs/heads/master | openstack_dashboard/dashboards/project/containers/tests.py | 13 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import tempfile
import django
from django.core.files.uploadedfile import InMemoryUploadedFile # noqa
from django import http
from django.utils import http as utils_http
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.containers import forms
from openstack_dashboard.dashboards.project.containers import tables
from openstack_dashboard.dashboards.project.containers import utils
from openstack_dashboard.dashboards.project.containers import views
from openstack_dashboard.test import helpers as test
from horizon.utils.urlresolvers import reverse # noqa
CONTAINER_NAME_1 = u"container one%\u6346"
CONTAINER_NAME_2 = u"container_two\u6346"
CONTAINER_NAME_1_QUOTED = utils_http.urlquote(CONTAINER_NAME_1)
CONTAINER_NAME_2_QUOTED = utils_http.urlquote(CONTAINER_NAME_2)
INVALID_CONTAINER_NAME_1 = utils_http.urlquote(CONTAINER_NAME_1_QUOTED)
INVALID_CONTAINER_NAME_2 = utils_http.urlquote(CONTAINER_NAME_2_QUOTED)
CONTAINER_INDEX_URL = reverse('horizon:project:containers:index')
INVALID_PATHS = []
def invalid_paths():
if not INVALID_PATHS:
for x in (CONTAINER_NAME_1_QUOTED, CONTAINER_NAME_2_QUOTED):
y = reverse('horizon:project:containers:index',
args=(utils.wrap_delimiter(x), ))
INVALID_PATHS.append(y)
for x in (CONTAINER_NAME_1, CONTAINER_NAME_2):
INVALID_PATHS.append(CONTAINER_INDEX_URL + x)
return INVALID_PATHS
class SwiftTests(test.TestCase):
def _test_invalid_paths(self, response):
for x in invalid_paths():
self.assertNotContains(response, x)
@test.create_stubs({api.swift: ('swift_get_containers',)})
def test_index_no_container_selected(self):
containers = self.containers.list()
api.swift.swift_get_containers(IsA(http.HttpRequest), marker=None) \
.AndReturn((containers, False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'project/containers/index.html')
self.assertIn('table', res.context)
resp_containers = res.context['table'].data
self.assertEqual(len(resp_containers), len(containers))
@test.create_stubs({api.swift: ('swift_delete_container', )})
def test_delete_container(self):
for container in self.containers.list():
self.mox.ResetAll() # mandatory in a for loop
api.swift.swift_delete_container(IsA(http.HttpRequest),
container.name)
self.mox.ReplayAll()
action_string = u"containers__delete__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = tables.ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
@test.create_stubs({api.swift: ('swift_get_objects', )})
def test_delete_container_nonempty(self):
container = self.containers.first()
objects = self.objects.list()
api.swift.swift_get_objects(IsA(http.HttpRequest),
container.name).AndReturn([objects, False])
self.mox.ReplayAll()
action_string = u"containers__delete__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
req.META['HTTP_REFERER'] = '%s/%s' % (CONTAINER_INDEX_URL,
container.name)
table = tables.ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(six.text_type(list(req._messages)[0].message),
u"The container cannot be deleted "
u"since it is not empty.")
def test_create_container_get(self):
res = self.client.get(reverse('horizon:project:containers:create'))
self.assertTemplateUsed(res, 'project/containers/create.html')
@test.create_stubs({api.swift: ('swift_create_container',)})
def test_create_container_post(self):
for container in self.containers.list():
self.mox.ResetAll() # mandatory in a for loop
api.swift.swift_create_container(IsA(http.HttpRequest),
container.name,
metadata=({'is_public': False}))
self.mox.ReplayAll()
formData = {'name': container.name,
'access': "private",
'method': forms.CreateContainer.__name__}
res = self.client.post(
reverse('horizon:project:containers:create'), formData)
args = (utils.wrap_delimiter(container.name),)
url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.swift: ('swift_update_container', )})
def test_update_container_to_public(self):
container = self.containers.get(name=u"container one%\u6346")
api.swift.swift_update_container(IsA(http.HttpRequest),
container.name,
metadata=({'is_public': True}))
self.mox.ReplayAll()
action_string = u"containers__make_public__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = tables.ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
@test.create_stubs({api.swift: ('swift_update_container', )})
def test_update_container_to_private(self):
container = self.containers.get(name=u"container_two\u6346")
api.swift.swift_update_container(IsA(http.HttpRequest),
container.name,
metadata=({'is_public': False}))
self.mox.ReplayAll()
action_string = u"containers__make_private__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = tables.ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
@test.create_stubs({api.swift: ('swift_get_containers',
'swift_get_objects')})
def test_index_container_selected(self):
containers = (self.containers.list(), False)
ret = (self.objects.list(), False)
api.swift.swift_get_containers(IsA(http.HttpRequest),
marker=None).AndReturn(containers)
api.swift.swift_get_objects(IsA(http.HttpRequest),
self.containers.first().name,
marker=None,
prefix=None).AndReturn(ret)
self.mox.ReplayAll()
container_name = self.containers.first().name
res = self.client.get(
reverse('horizon:project:containers:index',
args=[utils.wrap_delimiter(container_name)]))
self.assertTemplateUsed(res, 'project/containers/index.html')
# UTF8 encoding here to ensure there aren't problems with Nose output.
expected = [obj.name.encode('utf8') for obj in self.objects.list()]
self.assertQuerysetEqual(res.context['objects_table'].data,
expected,
lambda obj: obj.name.encode('utf8'))
# Check if the two forms' URL are properly 'urlquote()d'.
form_action = ' action="%s%s/" ' % (CONTAINER_INDEX_URL,
CONTAINER_NAME_1_QUOTED)
self.assertContains(res, form_action, count=2)
self._test_invalid_paths(res)
@test.create_stubs({api.swift: ('swift_upload_object',)})
def test_upload(self):
container = self.containers.first()
obj = self.objects.first()
OBJECT_DATA = 'objectData'
temp_file = tempfile.TemporaryFile()
temp_file.write(OBJECT_DATA)
temp_file.flush()
temp_file.seek(0)
api.swift.swift_upload_object(IsA(http.HttpRequest),
container.name,
obj.name,
IsA(InMemoryUploadedFile)).AndReturn(obj)
self.mox.ReplayAll()
upload_url = reverse('horizon:project:containers:object_upload',
args=[container.name])
res = self.client.get(upload_url)
self.assertTemplateUsed(res, 'project/containers/upload.html')
self.assertContains(res, 'enctype="multipart/form-data"')
self._test_invalid_paths(res)
formData = {'method': forms.UploadObject.__name__,
'container_name': container.name,
'name': obj.name,
'object_file': temp_file}
res = self.client.post(upload_url, formData)
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_upload_object',)})
def test_upload_without_file(self):
container = self.containers.first()
obj = self.objects.first()
api.swift.swift_upload_object(IsA(http.HttpRequest),
container.name,
obj.name,
None).AndReturn(obj)
self.mox.ReplayAll()
upload_url = reverse('horizon:project:containers:object_upload',
args=[container.name])
res = self.client.get(upload_url)
self.assertTemplateUsed(res, 'project/containers/upload.html')
res = self.client.get(upload_url)
self.assertContains(res, 'enctype="multipart/form-data"')
self.assertNotContains(res, INVALID_CONTAINER_NAME_1)
self.assertNotContains(res, INVALID_CONTAINER_NAME_2)
formData = {'method': forms.UploadObject.__name__,
'container_name': container.name,
'name': obj.name,
'object_file': None}
res = self.client.post(upload_url, formData)
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_create_pseudo_folder',)})
def test_create_pseudo_folder(self):
container = self.containers.first()
obj = self.objects.first()
api.swift.swift_create_pseudo_folder(IsA(http.HttpRequest),
container.name,
obj.name + "/").AndReturn(obj)
self.mox.ReplayAll()
create_pseudo_folder_url = reverse('horizon:project:containers:'
'create_pseudo_folder',
args=[container.name])
res = self.client.get(create_pseudo_folder_url)
self.assertTemplateUsed(res,
'project/containers/create_pseudo_folder.html')
self._test_invalid_paths(res)
formData = {'method': forms.CreatePseudoFolder.__name__,
'container_name': container.name,
'name': obj.name}
res = self.client.post(create_pseudo_folder_url, formData)
index_url = reverse('horizon:project:containers:index',
args=[utils.wrap_delimiter(container.name)])
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_delete_object',)})
def test_delete(self):
container = self.containers.first()
obj = self.objects.first()
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
api.swift.swift_delete_object(IsA(http.HttpRequest),
container.name,
obj.name)
self.mox.ReplayAll()
action_string = "objects__delete_object__%s" % obj.name
form_data = {"action": action_string}
req = self.factory.post(index_url, form_data)
kwargs = {"container_name": container.name}
table = tables.ObjectsTable(req, self.objects.list(), **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], index_url)
@test.create_stubs({api.swift: ('swift_delete_object',)})
def test_delete_pseudo_folder(self):
container = self.containers.first()
folder = self.folder.first()
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
api.swift.swift_delete_object(IsA(http.HttpRequest),
container.name,
folder.name + '/')
self.mox.ReplayAll()
action_string = "objects__delete_object__%s/%s" % (container.name,
folder.name)
form_data = {"action": action_string}
req = self.factory.post(index_url, form_data)
kwargs = {"container_name": container.name}
table = tables.ObjectsTable(req, self.folder.list(), **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], index_url)
@test.create_stubs({api.swift: ('swift_get_object',)})
def test_download(self):
for container in self.containers.list():
for obj in self.objects.list():
self.mox.ResetAll() # mandatory in a for loop
obj = copy.copy(obj)
_data = obj.data
def make_iter():
yield _data
obj.data = make_iter()
api.swift.swift_get_object(
IsA(http.HttpRequest),
container.name,
obj.name,
resp_chunk_size=api.swift.CHUNK_SIZE).AndReturn(obj)
self.mox.ReplayAll()
download_url = reverse(
'horizon:project:containers:object_download',
args=[container.name, obj.name])
res = self.client.get(download_url)
self.assertTrue(res.has_header('Content-Disposition'))
if django.VERSION >= (1, 5):
self.assertEqual(b''.join(res.streaming_content), _data)
self.assertNotContains(res, INVALID_CONTAINER_NAME_1)
self.assertNotContains(res, INVALID_CONTAINER_NAME_2)
else:
self.assertEqual(res.content, _data)
self.assertNotContains(res, INVALID_CONTAINER_NAME_1)
self.assertNotContains(res, INVALID_CONTAINER_NAME_2)
# Check that the returned Content-Disposition filename is well
# surrounded by double quotes and with commas removed
expected_name = '"%s"' % obj.name.replace(
',', '').encode('utf-8')
self.assertEqual(
res.get('Content-Disposition'),
'attachment; filename=%s' % expected_name
)
@test.create_stubs({api.swift: ('swift_get_containers',)})
def test_copy_index(self):
ret = (self.containers.list(), False)
api.swift.swift_get_containers(IsA(http.HttpRequest)).AndReturn(ret)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:containers:object_copy',
args=[self.containers.first().name,
self.objects.first().name]))
self.assertTemplateUsed(res, 'project/containers/copy.html')
self.assertNotContains(res, INVALID_CONTAINER_NAME_1)
self.assertNotContains(res, INVALID_CONTAINER_NAME_2)
@test.create_stubs({api.swift: ('swift_get_containers',
'swift_copy_object')})
def test_copy(self):
container_1 = self.containers.get(name=CONTAINER_NAME_1)
container_2 = self.containers.get(name=CONTAINER_NAME_2)
obj = self.objects.first()
ret = (self.containers.list(), False)
api.swift.swift_get_containers(IsA(http.HttpRequest)).AndReturn(ret)
api.swift.swift_copy_object(IsA(http.HttpRequest),
container_1.name,
obj.name,
container_2.name,
obj.name)
self.mox.ReplayAll()
formData = {'method': forms.CopyObject.__name__,
'new_container_name': container_2.name,
'new_object_name': obj.name,
'orig_container_name': container_1.name,
'orig_object_name': obj.name}
copy_url = reverse('horizon:project:containers:object_copy',
args=[container_1.name, obj.name])
res = self.client.post(copy_url, formData)
args = (utils.wrap_delimiter(container_2.name),)
index_url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_get_containers',
'swift_copy_object')})
def test_copy_get(self):
original_name = u"test.txt"
copy_name = u"test.copy.txt"
container = self.containers.first()
obj = self.objects.get(name=original_name)
ret = (self.containers.list(), False)
api.swift.swift_get_containers(IsA(http.HttpRequest)).AndReturn(ret)
self.mox.ReplayAll()
copy_url = reverse('horizon:project:containers:object_copy',
args=[container.name, obj.name])
res = self.client.get(copy_url)
# The copy's name must appear in initial data
pattern = ('<input id="id_new_object_name" value="%s" '
'name="new_object_name" type="text" '
'class="form-control" maxlength="255" />' % copy_name)
self.assertContains(res, pattern, html=True)
def test_get_copy_name(self):
self.assertEqual(views.CopyView.get_copy_name('test.txt'),
'test.copy.txt')
self.assertEqual(views.CopyView.get_copy_name('test'),
'test.copy')
@test.create_stubs({api.swift: ('swift_upload_object',)})
def test_update_with_file(self):
container = self.containers.first()
obj = self.objects.first()
OBJECT_DATA = 'objectData'
temp_file = tempfile.TemporaryFile()
temp_file.write(OBJECT_DATA)
temp_file.flush()
temp_file.seek(0)
api.swift.swift_upload_object(IsA(http.HttpRequest),
container.name,
obj.name,
IsA(InMemoryUploadedFile)).AndReturn(obj)
self.mox.ReplayAll()
update_url = reverse('horizon:project:containers:object_update',
args=[container.name, obj.name])
res = self.client.get(update_url)
self.assertTemplateUsed(res, 'project/containers/update.html')
self.assertContains(res, 'enctype="multipart/form-data"')
self._test_invalid_paths(res)
formData = {'method': forms.UpdateObject.__name__,
'container_name': container.name,
'name': obj.name,
'object_file': temp_file}
res = self.client.post(update_url, formData)
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_upload_object',)})
def test_update_without_file(self):
container = self.containers.first()
obj = self.objects.first()
self.mox.ReplayAll()
update_url = reverse('horizon:project:containers:object_update',
args=[container.name, obj.name])
res = self.client.get(update_url)
self.assertTemplateUsed(res, 'project/containers/update.html')
self.assertContains(res, 'enctype="multipart/form-data"')
self._test_invalid_paths(res)
formData = {'method': forms.UpdateObject.__name__,
'container_name': container.name,
'name': obj.name}
res = self.client.post(update_url, formData)
args = (utils.wrap_delimiter(container.name),)
index_url = reverse('horizon:project:containers:index', args=args)
self.assertRedirectsNoFollow(res, index_url)
@test.create_stubs({api.swift: ('swift_get_container', )})
def test_view_container(self):
for container in self.containers.list():
self.mox.ResetAll() # mandatory in a for loop
api.swift.swift_get_container(IsA(http.HttpRequest),
container.name,
with_data=False) \
.AndReturn(container)
self.mox.ReplayAll()
view_url = reverse('horizon:project:containers:container_detail',
args=[container.name])
res = self.client.get(view_url)
self.assertTemplateUsed(res,
'project/containers/container_detail.html')
self.assertContains(res, container.name, 1, 200)
self.assertNotContains(res, INVALID_CONTAINER_NAME_1)
self.assertNotContains(res, INVALID_CONTAINER_NAME_2)
@test.create_stubs({api.swift: ('swift_get_object', )})
def test_view_object(self):
for container in self.containers.list():
for obj in self.objects.list():
self.mox.ResetAll() # mandatory in a for loop
api.swift.swift_get_object(IsA(http.HttpRequest),
container.name,
obj.name,
with_data=False) \
.AndReturn(obj)
self.mox.ReplayAll()
view_url = reverse('horizon:project:containers:object_detail',
args=[container.name, obj.name])
res = self.client.get(view_url)
self.assertTemplateUsed(
res, 'project/containers/object_detail.html')
self.assertContains(res, obj.name, 1, 200)
self._test_invalid_paths(res)
def test_wrap_delimiter(self):
expected = {
'containerA': 'containerA/',
'containerB%': 'containerB%/', # no urlquote() should occur
'containerC/': 'containerC/', # already wrapped name
'containerD/objectA': 'containerD/objectA/'
}
for name, expected_name in expected.items():
self.assertEqual(utils.wrap_delimiter(name), expected_name)
|
hip-odoo/odoo | refs/heads/10.0 | addons/mail/tests/test_mail_gateway.py | 8 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import socket
from odoo.addons.mail.tests.common import TestMail
from odoo.tools import mute_logger
MAIL_TEMPLATE = """Return-Path: <whatever-2a840@postmaster.twitter.com>
To: {to}
cc: {cc}
Received: by mail1.openerp.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
From: {email_from}
Subject: {subject}
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="----=_Part_4200734_24778174.1344608186754"
Date: Fri, 10 Aug 2012 14:16:26 +0000
Message-ID: {msg_id}
{extra}
------=_Part_4200734_24778174.1344608186754
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: quoted-printable
Please call me as soon as possible this afternoon!
--
Sylvie
------=_Part_4200734_24778174.1344608186754
Content-Type: text/html; charset=utf-8
Content-Transfer-Encoding: quoted-printable
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>=20
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8" />
</head>=20
<body style=3D"margin: 0; padding: 0; background: #ffffff;-webkit-text-size-adjust: 100%;">=20
<p>Please call me as soon as possible this afternoon!</p>
<p>--<br/>
Sylvie
<p>
</body>
</html>
------=_Part_4200734_24778174.1344608186754--
"""
MAIL_TEMPLATE_PLAINTEXT = """Return-Path: <whatever-2a840@postmaster.twitter.com>
To: {to}
Received: by mail1.openerp.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
From: Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>
Subject: {subject}
MIME-Version: 1.0
Content-Type: text/plain
Date: Fri, 10 Aug 2012 14:16:26 +0000
Message-ID: {msg_id}
{extra}
Please call me as soon as possible this afternoon!
--
Sylvie
"""
MAIL_MULTIPART_MIXED = """Return-Path: <ignasse.carambar@gmail.com>
X-Original-To: raoul@grosbedon.fr
Delivered-To: raoul@grosbedon.fr
Received: by mail1.grosbedon.com (Postfix, from userid 10002)
id E8166BFACA; Fri, 23 Aug 2013 13:18:01 +0200 (CEST)
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail1.grosbedon.com
X-Spam-Level:
X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00,FREEMAIL_FROM,
HTML_MESSAGE,RCVD_IN_DNSWL_LOW autolearn=unavailable version=3.3.1
Received: from mail-ie0-f173.google.com (mail-ie0-f173.google.com [209.85.223.173])
by mail1.grosbedon.com (Postfix) with ESMTPS id 9BBD7BFAAA
for <raoul@openerp.fr>; Fri, 23 Aug 2013 13:17:55 +0200 (CEST)
Received: by mail-ie0-f173.google.com with SMTP id qd12so575130ieb.4
for <raoul@grosbedon.fr>; Fri, 23 Aug 2013 04:17:54 -0700 (PDT)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
d=gmail.com; s=20120113;
h=mime-version:date:message-id:subject:from:to:content-type;
bh=dMNHV52EC7GAa7+9a9tqwT9joy9z+1950J/3A6/M/hU=;
b=DGuv0VjegdSrEe36ADC8XZ9Inrb3Iu+3/52Bm+caltddXFH9yewTr0JkCRQaJgMwG9
qXTQgP8qu/VFEbCh6scu5ZgU1hknzlNCYr3LT+Ih7dAZVUEHUJdwjzUU1LFV95G2RaCd
/Lwff6CibuUvrA+0CBO7IRKW0Sn5j0mukYu8dbaKsm6ou6HqS8Nuj85fcXJfHSHp6Y9u
dmE8jBh3fHCHF/nAvU+8aBNSIzl1FGfiBYb2jCoapIuVFitKR4q5cuoodpkH9XqqtOdH
DG+YjEyi8L7uvdOfN16eMr7hfUkQei1yQgvGu9/5kXoHg9+Gx6VsZIycn4zoaXTV3Nhn
nu4g==
MIME-Version: 1.0
X-Received: by 10.50.124.65 with SMTP id mg1mr1144467igb.43.1377256674216;
Fri, 23 Aug 2013 04:17:54 -0700 (PDT)
Received: by 10.43.99.71 with HTTP; Fri, 23 Aug 2013 04:17:54 -0700 (PDT)
Date: Fri, 23 Aug 2013 13:17:54 +0200
Message-ID: <CAP76m_V4BY2F7DWHzwfjteyhW8L2LJswVshtmtVym+LUJ=rASQ@mail.gmail.com>
Subject: Test mail multipart/mixed
From: =?ISO-8859-1?Q?Raoul Grosbedon=E9e?= <ignasse.carambar@gmail.com>
To: Followers of ASUSTeK-Joseph-Walters <raoul@grosbedon.fr>
Content-Type: multipart/mixed; boundary=089e01536c4ed4d17204e49b8e96
--089e01536c4ed4d17204e49b8e96
Content-Type: multipart/alternative; boundary=089e01536c4ed4d16d04e49b8e94
--089e01536c4ed4d16d04e49b8e94
Content-Type: text/plain; charset=ISO-8859-1
Should create a multipart/mixed: from gmail, *bold*, with attachment.
--
Marcel Boitempoils.
--089e01536c4ed4d16d04e49b8e94
Content-Type: text/html; charset=ISO-8859-1
<div dir="ltr">Should create a multipart/mixed: from gmail, <b>bold</b>, with attachment.<br clear="all"><div><br></div>-- <br>Marcel Boitempoils.</div>
--089e01536c4ed4d16d04e49b8e94--
--089e01536c4ed4d17204e49b8e96
Content-Type: text/plain; charset=US-ASCII; name="test.txt"
Content-Disposition: attachment; filename="test.txt"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_hkpb27k00
dGVzdAo=
--089e01536c4ed4d17204e49b8e96--"""
MAIL_MULTIPART_MIXED_TWO = """X-Original-To: raoul@grosbedon.fr
Delivered-To: raoul@grosbedon.fr
Received: by mail1.grosbedon.com (Postfix, from userid 10002)
id E8166BFACA; Fri, 23 Aug 2013 13:18:01 +0200 (CEST)
From: "Bruce Wayne" <bruce@wayneenterprises.com>
Content-Type: multipart/alternative;
boundary="Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227"
Message-Id: <6BB1FAB2-2104-438E-9447-07AE2C8C4A92@sexample.com>
Mime-Version: 1.0 (Mac OS X Mail 7.3 \(1878.6\))
--Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227
Content-Transfer-Encoding: 7bit
Content-Type: text/plain;
charset=us-ascii
First and second part
--Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227
Content-Type: multipart/mixed;
boundary="Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F"
--Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F
Content-Transfer-Encoding: 7bit
Content-Type: text/html;
charset=us-ascii
<html><head></head><body>First part</body></html>
--Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F
Content-Disposition: inline;
filename=thetruth.pdf
Content-Type: application/pdf;
name="thetruth.pdf"
Content-Transfer-Encoding: base64
SSBhbSB0aGUgQmF0TWFuCg==
--Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F
Content-Transfer-Encoding: 7bit
Content-Type: text/html;
charset=us-ascii
<html><head></head><body>Second part</body></html>
--Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F--
--Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227--
"""
MAIL_MULTIPART_IMAGE = """X-Original-To: raoul@example.com
Delivered-To: micheline@example.com
Received: by mail1.example.com (Postfix, from userid 99999)
id 9DFB7BF509; Thu, 17 Dec 2015 15:22:56 +0100 (CET)
X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on mail1.example.com
X-Spam-Level: *
X-Spam-Status: No, score=1.1 required=5.0 tests=FREEMAIL_FROM,
HTML_IMAGE_ONLY_08,HTML_MESSAGE,RCVD_IN_DNSWL_LOW,RCVD_IN_MSPIKE_H3,
RCVD_IN_MSPIKE_WL,T_DKIM_INVALID autolearn=no autolearn_force=no version=3.4.0
Received: from mail-lf0-f44.example.com (mail-lf0-f44.example.com [209.85.215.44])
by mail1.example.com (Postfix) with ESMTPS id 1D80DBF509
for <micheline@example.com>; Thu, 17 Dec 2015 15:22:56 +0100 (CET)
Authentication-Results: mail1.example.com; dkim=pass
reason="2048-bit key; unprotected key"
header.d=example.com header.i=@example.com header.b=kUkTIIlt;
dkim-adsp=pass; dkim-atps=neutral
Received: by mail-lf0-f44.example.com with SMTP id z124so47959461lfa.3
for <micheline@example.com>; Thu, 17 Dec 2015 06:22:56 -0800 (PST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
d=example.com; s=20120113;
h=mime-version:date:message-id:subject:from:to:content-type;
bh=GdrEuMrz6vxo/Z/F+mJVho/1wSe6hbxLx2SsP8tihzw=;
b=kUkTIIlt6fe4dftKHPNBkdHU2rO052o684R0e2bqH7roGUQFb78scYE+kqX0wo1zlk
zhKPVBR1TqTsYlqcHu+D3aUzai7L/Q5m40sSGn7uYGkZJ6m1TwrWNqVIgTZibarqvy94
NWhrjjK9gqd8segQdSjCgTipNSZME4bJCzPyBg/D5mqe07FPBJBGoF9SmIzEBhYeqLj1
GrXjb/D8J11aOyzmVvyt+bT+oeLUJI8E7qO5g2eQkMncyu+TyIXaRofOOBA14NhQ+0nS
w5O9rzzqkKuJEG4U2TJ2Vi2nl2tHJW2QPfTtFgcCzGxQ0+5n88OVlbGTLnhEIJ/SYpem
O5EA==
MIME-Version: 1.0
X-Received: by 10.25.167.197 with SMTP id q188mr22222517lfe.129.1450362175493;
Thu, 17 Dec 2015 06:22:55 -0800 (PST)
Received: by 10.25.209.145 with HTTP; Thu, 17 Dec 2015 06:22:55 -0800 (PST)
Date: Thu, 17 Dec 2015 15:22:55 +0100
Message-ID: <CAP76m_UB=aLqWEFccnq86AhkpwRB3aZoGL9vMffX7co3YEro_A@mail.gmail.com>
Subject: {subject}
From: =?UTF-8?Q?Thibault_Delavall=C3=A9e?= <raoul@example.com>
To: {to}
Content-Type: multipart/related; boundary=001a11416b9e9b229a05272b7052
--001a11416b9e9b229a05272b7052
Content-Type: multipart/alternative; boundary=001a11416b9e9b229805272b7051
--001a11416b9e9b229805272b7051
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: quoted-printable
Premi=C3=A8re image, orang=C3=A9e.
[image: Inline image 1]
Seconde image, rosa=C3=A7=C3=A9e.
[image: Inline image 2]
Troisi=C3=A8me image, verte!=C2=B5
[image: Inline image 3]
J'esp=C3=A8re que tout se passera bien.
--=20
Thibault Delavall=C3=A9e
--001a11416b9e9b229805272b7051
Content-Type: text/html; charset=UTF-8
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr"><div>Premi=C3=A8re image, orang=C3=A9e.</div><div><br></di=
v><div><img src=3D"cid:ii_151b519fc025fdd3" alt=3D"Inline image 1" width=3D=
"2" height=3D"2"><br></div><div><br></div><div>Seconde image, rosa=C3=A7=C3=
=A9e.</div><div><br></div><div><img src=3D"cid:ii_151b51a290ed6a91" alt=3D"=
Inline image 2" width=3D"2" height=3D"2"></div><div><br></div><div>Troisi=
=C3=A8me image, verte!=C2=B5</div><div><br></div><div><img src=3D"cid:ii_15=
1b51a37e5eb7a6" alt=3D"Inline image 3" width=3D"10" height=3D"10"><br></div=
><div><br></div><div>J'esp=C3=A8re que tout se passera bien.</div>-- <b=
r><div class=3D"gmail_signature">Thibault Delavall=C3=A9e</div>
</div>
--001a11416b9e9b229805272b7051--
--001a11416b9e9b229a05272b7052
Content-Type: image/gif; name="=?UTF-8?B?b3JhbmfDqWUuZ2lm?="
Content-Disposition: inline; filename="=?UTF-8?B?b3JhbmfDqWUuZ2lm?="
Content-Transfer-Encoding: base64
Content-ID: <ii_151b519fc025fdd3>
X-Attachment-Id: ii_151b519fc025fdd3
R0lGODdhAgACALMAAAAAAP///wAAAP//AP8AAP+AAAD/AAAAAAAA//8A/wAAAAAAAAAAAAAAAAAA
AAAAACwAAAAAAgACAAAEA7DIEgA7
--001a11416b9e9b229a05272b7052
Content-Type: image/gif; name="=?UTF-8?B?dmVydGUhwrUuZ2lm?="
Content-Disposition: inline; filename="=?UTF-8?B?dmVydGUhwrUuZ2lm?="
Content-Transfer-Encoding: base64
Content-ID: <ii_151b51a37e5eb7a6>
X-Attachment-Id: ii_151b51a37e5eb7a6
R0lGODlhCgAKALMAAAAAAIAAAACAAICAAAAAgIAAgACAgMDAwICAgP8AAAD/AP//AAAA//8A/wD/
/////ywAAAAACgAKAAAEClDJSau9OOvNe44AOw==
--001a11416b9e9b229a05272b7052
Content-Type: image/gif; name="=?UTF-8?B?cm9zYcOnw6llLmdpZg==?="
Content-Disposition: inline; filename="=?UTF-8?B?cm9zYcOnw6llLmdpZg==?="
Content-Transfer-Encoding: base64
Content-ID: <ii_151b51a290ed6a91>
X-Attachment-Id: ii_151b51a290ed6a91
R0lGODdhAgACALMAAAAAAP///wAAAP//AP8AAP+AAAD/AAAAAAAA//8A/wAAAP+AgAAAAAAAAAAA
AAAAACwAAAAAAgACAAAEA3DJFQA7
--001a11416b9e9b229a05272b7052--
"""
class TestMailgateway(TestMail):
def setUp(self):
super(TestMailgateway, self).setUp()
# groups@.. will cause the creation of new mail.channels
self.mail_channel_model = self.env['ir.model'].search([('model', '=', 'mail.channel')], limit=1)
self.alias = self.env['mail.alias'].create({
'alias_name': 'groups',
'alias_user_id': False,
'alias_model_id': self.mail_channel_model.id,
'alias_contact': 'everyone'})
# test@.. will cause the creation of new mail.test
self.mail_test_model = self.env['ir.model'].search([('model', '=', 'mail.test')], limit=1)
self.alias_2 = self.env['mail.alias'].create({
'alias_name': 'test',
'alias_user_id': False,
'alias_model_id': self.mail_test_model.id,
'alias_contact': 'everyone'})
# Set a first message on public group to test update and hierarchy
self.fake_email = self.env['mail.message'].create({
'model': 'mail.channel',
'res_id': self.group_public.id,
'subject': 'Public Discussion',
'message_type': 'email',
'author_id': self.partner_1.id,
'message_id': '<123456-openerp-%s-mail.channel@%s>' % (self.group_public.id, socket.gethostname()),
})
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_parse(self):
""" Test parsing of various scenarios of incoming emails """
res = self.env['mail.thread'].message_parse(MAIL_TEMPLATE_PLAINTEXT)
self.assertIn('Please call me as soon as possible this afternoon!',
res.get('body', ''),
'message_parse: missing text in text/plain body after parsing')
res = self.env['mail.thread'].message_parse(MAIL_TEMPLATE)
self.assertIn('<p>Please call me as soon as possible this afternoon!</p>',
res.get('body', ''),
'message_parse: missing html in multipart/alternative body after parsing')
res = self.env['mail.thread'].message_parse(MAIL_MULTIPART_MIXED)
self.assertNotIn('Should create a multipart/mixed: from gmail, *bold*, with attachment',
res.get('body', ''),
'message_parse: text version should not be in body after parsing multipart/mixed')
self.assertIn('<div dir="ltr">Should create a multipart/mixed: from gmail, <b>bold</b>, with attachment.<br clear="all"><div><br></div>',
res.get('body', ''),
'message_parse: html version should be in body after parsing multipart/mixed')
res = self.env['mail.thread'].message_parse(MAIL_MULTIPART_MIXED_TWO)
self.assertNotIn('First and second part',
res.get('body', ''),
'message_parse: text version should not be in body after parsing multipart/mixed')
self.assertIn('First part',
res.get('body', ''),
'message_parse: first part of the html version should be in body after parsing multipart/mixed')
self.assertIn('Second part',
res.get('body', ''),
'message_parse: second part of the html version should be in body after parsing multipart/mixed')
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_cid(self):
new_groups = self.format_and_process(MAIL_MULTIPART_IMAGE, subject='My Frogs', to='groups@example.com')
message = new_groups.message_ids[0]
for attachment in message.attachment_ids:
self.assertIn('/web/image/%s' % attachment.id, message.body)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_basic(self):
""" Incoming email on an alias creating a new record + message_new + message details """
new_groups = self.format_and_process(MAIL_TEMPLATE, subject='My Frogs', to='groups@example.com, other@gmail.com')
# Test: one group created by mailgateway administrator
self.assertEqual(len(new_groups), 1, 'message_process: a new mail.channel should have been created')
res = new_groups.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.env.uid,
'message_process: group should have been created by uid as alias_user_id is False on the alias')
# Test: one message that is the incoming email
self.assertEqual(len(new_groups.message_ids), 1,
'message_process: newly created group should have the incoming email in message_ids')
msg = new_groups.message_ids[0]
self.assertEqual(msg.subject, 'My Frogs',
'message_process: newly created group should have the incoming email as first message')
self.assertIn('Please call me as soon as possible this afternoon!', msg.body,
'message_process: newly created group should have the incoming email as first message')
self.assertEqual(msg.message_type, 'email',
'message_process: newly created group should have an email as first message')
self.assertEqual(msg.subtype_id, self.env.ref('mail.mt_comment'),
'message_process: newly created group should not have a log first message but an email')
# Test: sent emails: no-one
self.assertEqual(len(self._mails), 0,
'message_process: should create emails without any follower added')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_user_id(self):
""" Test alias ownership """
self.alias.write({'alias_user_id': self.user_employee.id})
new_groups = self.format_and_process(MAIL_TEMPLATE, to='groups@example.com, other@gmail.com')
# Test: one group created by mailgateway administrator
self.assertEqual(len(new_groups), 1, 'message_process: a new mail.channel should have been created')
res = new_groups.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.user_employee.id,
'message_process: group should have been created by alias_user_id')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_email_from(self):
""" Incoming email: not recognized author: email_from, no author_id, no followers """
new_groups = self.format_and_process(MAIL_TEMPLATE, to='groups@example.com, other@gmail.com')
self.assertFalse(new_groups.message_ids[0].author_id,
'message_process: unrecognized email -> no author_id')
self.assertIn('test.sylvie.lelitre@agrolait.com', new_groups.message_ids[0].email_from,
'message_process: unrecognized email -> email_from')
self.assertEqual(len(new_groups.message_partner_ids), 0,
'message_process: newly create group should not have any follower')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_author(self):
""" Incoming email: recognized author: email_from, author_id, added as follower """
new_groups = self.format_and_process(MAIL_TEMPLATE, email_from='Valid Lelitre <valid.lelitre@agrolait.com>', to='groups@example.com, valid.other@gmail.com')
self.assertEqual(new_groups.message_ids[0].author_id, self.partner_1,
'message_process: recognized email -> author_id')
self.assertIn('Valid Lelitre <valid.lelitre@agrolait.com>', new_groups.message_ids[0].email_from,
'message_process: recognized email -> email_from')
# TODO : the author of a message post on mail.channel should not be added as follower
# FAIL ON recognized email -> added as follower')
# self.assertEqual(new_groups.message_partner_ids, self.partner_1,
# 'message_process: recognized email -> added as follower')
self.assertEqual(len(self._mails), 0,
'message_process: no bounce or notificatoin email should be sent with follower = author')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_partners_bounce(self):
""" Incoming email from an unknown partner on a Partners only alias -> bounce """
self.alias.write({'alias_contact': 'partners'})
# Test: no group created, email bounced
new_groups = self.format_and_process(MAIL_TEMPLATE, subject='New Frogs', to='groups@example.com, other@gmail.com')
self.assertTrue(len(new_groups) == 0)
self.assertEqual(len(self._mails), 1,
'message_process: incoming email on Partners alias should send a bounce email')
self.assertIn('New Frogs', self._mails[0].get('subject'),
'message_process: bounce email on Partners alias should contain the original subject')
self.assertIn('whatever-2a840@postmaster.twitter.com', self._mails[0].get('email_to'),
'message_process: bounce email on Partners alias should go to Return-Path address')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_followers_bounce(self):
""" Incoming email from unknown partner / not follower partner on a Followers only alias -> bounce """
self.alias.write({
'alias_contact': 'followers',
'alias_parent_model_id': self.mail_channel_model.id,
'alias_parent_thread_id': self.group_pigs.id})
# Test: unknown on followers alias -> bounce
new_groups = self.format_and_process(MAIL_TEMPLATE, to='groups@example.com, other@gmail.com')
self.assertEqual(len(new_groups), 0, 'message_process: should have bounced')
self.assertEqual(len(self._mails), 1,
'message_process: incoming email on Followers alias should send a bounce email')
# Test: partner on followers alias -> bounce
self._init_mock_build_email()
new_groups = self.format_and_process(MAIL_TEMPLATE, email_from='Valid Lelitre <valid.lelitre@agrolait.com>', to='groups@example.com, other@gmail.com')
self.assertTrue(len(new_groups) == 0, 'message_process: should have bounced')
self.assertEqual(len(self._mails), 1,
'message_process: incoming email on Followers alias should send a bounce email')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_partner(self):
""" Incoming email from a known partner on a Partners alias -> ok (+ test on alias.user_id) """
self.alias.write({'alias_contact': 'partners'})
new_groups = self.format_and_process(MAIL_TEMPLATE, email_from='Valid Lelitre <valid.lelitre@agrolait.com>', to='groups@example.com, valid.other@gmail.com')
# Test: one group created by alias user
self.assertEqual(len(new_groups), 1, 'message_process: a new mail.channel should have been created')
# Test: one message that is the incoming email
self.assertEqual(len(new_groups.message_ids), 1,
'message_process: newly created group should have the incoming email in message_ids')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_followers(self):
""" Incoming email from a parent document follower on a Followers only alias -> ok """
self.alias.write({
'alias_contact': 'followers',
'alias_parent_model_id': self.mail_channel_model.id,
'alias_parent_thread_id': self.group_pigs.id})
self.group_pigs.message_subscribe(partner_ids=[self.partner_1.id])
new_groups = self.format_and_process(MAIL_TEMPLATE, email_from='Valid Lelitre <valid.lelitre@agrolait.com>', to='groups@example.com, other6@gmail.com')
# Test: one group created by Raoul (or Sylvie maybe, if we implement it)
self.assertEqual(len(new_groups), 1, 'message_process: a new mail.channel should have been created')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_update(self):
""" Incoming email update discussion + notification email """
self.alias.write({'alias_force_thread_id': self.group_public.id})
self.group_public.message_subscribe(partner_ids=[self.partner_1.id])
new_groups = self.format_and_process(
MAIL_TEMPLATE, email_from='valid.other@gmail.com',
msg_id='<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>',
to='groups@example.com>', subject='Re: cats')
# Test: no new group + new message
self.assertEqual(len(new_groups), 0,
'message_process: reply on Frogs should not have created a new group with new subject')
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: group should contain one new message')
# Test: sent emails: 1 (Sylvie copy of the incoming email)
self.assertEqual(len(self._mails), 1,
'message_process: one email should have been generated')
self.assertIn('valid.lelitre@agrolait.com', self._mails[0].get('email_to')[0],
'message_process: email should be sent to Sylvie')
# TODO : the author of a message post on mail.channel should not be added as follower
# FAIL ON 'message_process: after reply, group should have 2 followers') ` AssertionError: res.partner(104,) != res.partner(104, 105) : message_process: after reply, group should have 2 followers
# Test: author (and not recipient) added as follower
# self.assertEqual(self.group_public.message_partner_ids, self.partner_1 | self.partner_2,
# 'message_process: after reply, group should have 2 followers')
# self.assertEqual(self.group_public.message_channel_ids, self.env['mail.channel'],
# 'message_process: after reply, group should have 2 followers (0 channels)')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_in_reply_to(self):
""" Incoming email using in-rely-to should go into the right destination even with a wrong destination """
self.format_and_process(
MAIL_TEMPLATE, email_from='valid.other@gmail.com',
msg_id='<1198923581.41972151344608186800.JavaMail.diff1@agrolait.com>',
to='erroneous@example.com>', subject='Re: news',
extra='In-Reply-To:\r\n\t%s\n' % self.fake_email.message_id)
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: group should contain one new message')
self.assertEqual(len(self.fake_email.child_ids), 1, 'message_process: new message should be children of the existing one')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references(self):
""" Incoming email using references should go into the right destination even with a wrong destination """
self.format_and_process(
MAIL_TEMPLATE, to='erroneous@example.com',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
msg_id='<1198923581.41972151344608186800.JavaMail.4@agrolait.com>')
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: group should contain one new message')
self.assertEqual(len(self.fake_email.child_ids), 1, 'message_process: new message should be children of the existing one')
def test_message_process_references_external(self):
""" Incoming email being a reply to an external email processed by odoo should update thread accordingly """
new_message_id = '<ThisIsTooMuchFake.MonsterEmail.789@agrolait.com>'
self.fake_email.write({
'message_id': new_message_id
})
self.format_and_process(
MAIL_TEMPLATE, to='erroneous@example.com',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
msg_id='<1198923581.41972151344608186800.JavaMail.4@agrolait.com>')
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: group should contain one new message')
self.assertEqual(len(self.fake_email.child_ids), 1, 'message_process: new message should be children of the existing one')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references_forward(self):
""" Incoming email using references but with alias forward should not go into references destination """
res_test = self.format_and_process(
MAIL_TEMPLATE, to='test@example.com',
subject='My Dear Forward',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
msg_id='<1198923581.41972151344608186800.JavaMail.4@agrolait.com>',
target_model='mail.test')
self.assertEqual(len(self.group_public.message_ids), 1, 'message_process: group should not contain new message')
self.assertEqual(len(self.fake_email.child_ids), 0, 'message_process: original email should not contain childs')
self.assertEqual(res_test.name, 'My Dear Forward')
self.assertEqual(len(res_test.message_ids), 1)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references_forward_cc(self):
""" Incoming email using references but with alias forward should not go into references destination """
self.format_and_process(
MAIL_TEMPLATE, to='erroneous@example.com', cc='test@example.com',
subject='My Dear Forward',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
msg_id='<1198923581.41972151344608186800.JavaMail.4@agrolait.com>',
target_model='mail.test')
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: group should contain one new message')
self.assertEqual(len(self.fake_email.child_ids), 1, 'message_process: new message should be children of the existing one')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_model_res_id(self):
""" Incoming email with ref holding model / res_id but that does not match any message in the thread: must raise since OpenERP saas-3 """
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE, email_from='valid.lelitre@agrolait.com',
to='noone@example.com', subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.channel@%s>' % (self.group_public.id, socket.gethostname()),
msg_id='<1198923581.41972151344608186802.JavaMail.diff1@agrolait.com>')
# when 6.1 messages are present, compat mode is available
# Odoo 10 update: compat mode has been removed and should not work anymore
self.fake_email.write({'message_id': False})
# Do: compat mode accepts partial-matching emails
self.assertRaises(
ValueError,
self.format_and_process,
MAIL_TEMPLATE, email_from='other5@gmail.com',
msg_id='<1.2.JavaMail.new@agrolait.com>',
to='noone@example.com>', subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.channel@%s>' % (self.group_public.id, socket.gethostname()))
# 3''. 6.1 compat mode should not work if hostname does not match!
# Odoo 10 update: compat mode has been removed and should not work anymore and does not depend from hostname
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE, email_from='other5@gmail.com',
msg_id='<1.3.JavaMail.new@agrolait.com>',
to='noone@example.com>', subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.channel@neighbor.com>' % self.group_public.id)
# Test created messages
self.assertEqual(len(self.group_public.message_ids), 1)
self.assertEqual(len(self.group_public.message_ids[0].child_ids), 0)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_duplicate(self):
""" Duplicate emails (same message_id) are not processed """
self.alias.write({'alias_force_thread_id': self.group_public.id,})
# Post a base message
frog_groups = self.format_and_process(
MAIL_TEMPLATE, email_from='valid.other@gmail.com', subject='Re: super cats',
msg_id='<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>')
# Do: due to some issue, same email goes back into the mailgateway
frog_groups = self.format_and_process(
MAIL_TEMPLATE, email_from='other4@gmail.com', subject='Re: news',
msg_id='<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>',
extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>\n')
# Test: no group 'Re: news' created, still only 1 Frogs group
self.assertEqual(len(frog_groups), 0,
'message_process: reply on Frogs should not have created a new group with new subject')
# Test: no new message
self.assertEqual(len(self.group_public.message_ids), 2, 'message_process: message with already existing message_id should not have been duplicated')
# Test: message_id is still unique
no_of_msg = self.env['mail.message'].search_count([('message_id', 'ilike', '<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>')])
self.assertEqual(no_of_msg, 1,
'message_process: message with already existing message_id should not have been duplicated')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_partner_find(self):
""" Finding the partner based on email, based on partner / user / follower """
from_1 = self.env['res.partner'].create({'name': 'A', 'email': 'from.test@example.com'})
self.format_and_process(MAIL_TEMPLATE, to='public@example.com', msg_id='<1>', email_from='Brice Denisse <from.test@example.com>')
self.assertEqual(self.group_public.message_ids[0].author_id, from_1, 'message_process: email_from -> author_id wrong')
self.group_public.message_unsubscribe([from_1.id])
from_2 = self.env['res.users'].with_context({'no_reset_password': True}).create({'name': 'B', 'login': 'B', 'email': 'from.test@example.com'})
self.format_and_process(MAIL_TEMPLATE, to='public@example.com', msg_id='<2>', email_from='Brice Denisse <from.test@example.com>')
self.assertEqual(self.group_public.message_ids[0].author_id, from_2.partner_id, 'message_process: email_from -> author_id wrong')
self.group_public.message_unsubscribe([from_2.partner_id.id])
from_3 = self.env['res.partner'].create({'name': 'C', 'email': 'from.test@example.com'})
self.group_public.message_subscribe([from_3.id])
self.format_and_process(MAIL_TEMPLATE, to='public@example.com', msg_id='<3>', email_from='Brice Denisse <from.test@example.com>')
self.assertEqual(self.group_public.message_ids[0].author_id, from_3, 'message_process: email_from -> author_id wrong')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_crash_wrong_model(self):
""" Incoming email with model that does not accepts incoming emails must raise """
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE,
to='noone@example.com', subject='spam', extra='', model='res.country',
msg_id='<1198923581.41972151344608186760.JavaMail.new4@agrolait.com>')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_crash_no_data(self):
""" Incoming email without model and without alias must raise """
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE,
to='noone@example.com', subject='spam', extra='',
msg_id='<1198923581.41972151344608186760.JavaMail.new5@agrolait.com>')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_fallback(self):
""" Incoming email with model that accepting incoming emails as fallback """
frog_groups = self.format_and_process(
MAIL_TEMPLATE, to='noone@example.com', subject='Spammy', extra='', model='mail.channel',
msg_id='<1198923581.41972151344608186760.JavaMail.new6@agrolait.com>')
self.assertEqual(len(frog_groups), 1,
'message_process: erroneous email but with a fallback model should have created a new mail.channel')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_plain_text(self):
""" Incoming email in plaintext should be stored as html """
frog_groups = self.format_and_process(
MAIL_TEMPLATE_PLAINTEXT, to='groups@example.com', subject='Frogs Return', extra='',
msg_id='<deadcafe.1337@smtp.agrolait.com>')
self.assertEqual(len(frog_groups), 1, 'message_process: a new mail.channel should have been created')
msg = frog_groups.message_ids[0]
# signature recognition -> Sylvie should be in a span
self.assertIn('<pre>\nPlease call me as soon as possible this afternoon!\n<span data-o-mail-quote="1">\n--\nSylvie\n</span></pre>', msg.body,
'message_process: plaintext incoming email incorrectly parsed')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_private_discussion(self):
""" Testing private discussion between partners. """
msg1_pids = [self.env.user.partner_id.id, self.partner_1.id]
# Do: Raoul writes to Bert and Administrator, with a thread_model in context that should not be taken into account
msg1 = self.env['mail.thread'].with_context({
'thread_model': 'mail.channel'
}).sudo(self.user_employee).message_post(partner_ids=msg1_pids, subtype='mail.mt_comment')
# Test: message recipients
msg = self.env['mail.message'].browse(msg1.id)
self.assertEqual(msg.partner_ids, self.env.user.partner_id | self.partner_1,
'message_post: private discussion: incorrect recipients')
self.assertEqual(msg.model, False,
'message_post: private discussion: context key "thread_model" not correctly ignored when having no res_id')
# Test: message-id
self.assertIn('openerp-private', msg.message_id, 'message_post: private discussion: message-id should contain the private keyword')
# Do: Bert replies through mailgateway (is a customer)
self.format_and_process(
MAIL_TEMPLATE, to='not_important@mydomain.com', email_from='valid.lelitre@agrolait.com',
extra='In-Reply-To: %s' % msg.message_id, msg_id='<test30.JavaMail.0@agrolait.com>')
# Test: last mail_message created
msg2 = self.env['mail.message'].search([], limit=1)
# Test: message recipients
self.assertEqual(msg2.author_id, self.partner_1,
'message_post: private discussion: wrong author through mailgatewya based on email')
self.assertEqual(msg2.partner_ids, self.user_employee.partner_id | self.env.user.partner_id,
'message_post: private discussion: incorrect recipients when replying')
# Do: Bert replies through chatter (is a customer)
msg3 = self.env['mail.thread'].message_post(author_id=self.partner_1.id, parent_id=msg1.id, subtype='mail.mt_comment')
# Test: message recipients
msg = self.env['mail.message'].browse(msg3.id)
self.assertEqual(msg.partner_ids, self.user_employee.partner_id | self.env.user.partner_id,
'message_post: private discussion: incorrect recipients when replying')
self.assertEqual(msg.needaction_partner_ids, self.user_employee.partner_id | self.env.user.partner_id,
'message_post: private discussion: incorrect notified recipients when replying')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_forward_parent_id(self):
msg = self.group_pigs.sudo(self.user_employee).message_post(no_auto_thread=True, subtype='mail.mt_comment')
self.assertNotIn(msg.model, msg.message_id)
self.assertNotIn('-%d-' % msg.res_id, msg.message_id)
self.assertIn('reply_to', msg.message_id)
# forward it to a new thread AND an existing thread
fw_msg_id = '<THIS.IS.A.FW.MESSAGE.1@bert.fr>'
fw_message = MAIL_TEMPLATE.format(to='groups@example.com',
cc='',
subject='FW: Re: 1',
email_from='b.t@example.com',
extra='In-Reply-To: %s' % msg.message_id,
msg_id=fw_msg_id)
self.env['mail.thread'].message_process(None, fw_message)
msg_fw = self.env['mail.message'].search([('message_id', '=', fw_msg_id)])
self.assertEqual(len(msg_fw), 1)
channel = self.env['mail.channel'].search([('name', "=", msg_fw.subject)])
self.assertEqual(len(channel), 1)
self.assertEqual(msg_fw.model, 'mail.channel')
self.assertFalse(msg_fw.parent_id)
self.assertTrue(msg_fw.res_id == channel.id)
fw_msg_id = '<THIS.IS.A.FW.MESSAGE.2@bert.fr>'
fw_message = MAIL_TEMPLATE.format(to='public@example.com',
cc='',
subject='FW: Re: 2',
email_from='b.t@example.com',
extra='In-Reply-To: %s' % msg.message_id,
msg_id=fw_msg_id)
self.env['mail.thread'].message_process(None, fw_message)
msg_fw = self.env['mail.message'].search([('message_id', '=', fw_msg_id)])
self.assertEqual(len(msg_fw), 1)
channel = self.env['mail.channel'].search([('name', "=", msg_fw.subject)])
self.assertEqual(len(channel), 0)
self.assertEqual(msg_fw.model, 'mail.channel')
self.assertFalse(msg_fw.parent_id)
self.assertTrue(msg_fw.res_id == self.group_public.id)
|
youprofit/zato | refs/heads/master | code/zato-web-admin/src/zato/admin/web/forms/security/wss.py | 6 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Django
from django import forms
class CreateForm(forms.Form):
id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:90%"}))
is_active = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'checked':'checked'}))
username = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:90%"}))
reject_empty_nonce_creat = forms.BooleanField(widget=forms.CheckboxInput(attrs={'checked':'checked'}))
reject_stale_tokens = forms.BooleanField(widget=forms.CheckboxInput(attrs={'checked':'checked'}))
reject_expiry_limit = forms.IntegerField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:20%"}))
nonce_freshness_time = forms.IntegerField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:20%"}))
class EditForm(CreateForm):
is_active = forms.BooleanField(required=False, widget=forms.CheckboxInput())
reject_empty_nonce_creat = forms.BooleanField(widget=forms.CheckboxInput())
reject_stale_tokens = forms.BooleanField(widget=forms.CheckboxInput())
|
EDUlib/edx-platform | refs/heads/master | lms/djangoapps/verify_student/tests/test_utils.py | 4 | """
Tests for verify_student utility functions.
"""
import unittest
from datetime import timedelta
from unittest import mock
from unittest.mock import patch
import ddt
from django.conf import settings
from django.utils import timezone
from pytest import mark
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.verify_student.models import ManualVerification, SoftwareSecurePhotoVerification, SSOVerification
from lms.djangoapps.verify_student.utils import (
most_recent_verification,
submit_request_to_ss,
verification_for_datetime
)
FAKE_SETTINGS = {
"DAYS_GOOD_FOR": 10,
}
@ddt.ddt
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@mark.django_db
class TestVerifyStudentUtils(unittest.TestCase):
"""
Tests for utility functions in verify_student.
"""
def test_verification_for_datetime(self):
user = UserFactory.create()
now = timezone.now()
# No attempts in the query set, so should return None
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(now, query)
assert result is None
# Should also return None if no deadline specified
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(None, query)
assert result is None
# Make an attempt
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Before the created date, should get no results
before = attempt.created_at - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(before, query)
assert result is None
# Immediately after the created date, should get the attempt
after_created = attempt.created_at + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(after_created, query)
assert result == attempt
# If no deadline specified, should return first available
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(None, query)
assert result == attempt
# Immediately before the expiration date, should get the attempt
expiration = attempt.expiration_datetime + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(before_expiration, query)
assert result == attempt
# Immediately after the expiration date, should not get the attempt
attempt.expiration_date = now - timedelta(seconds=1)
attempt.save()
after = now + timedelta(days=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(after, query)
assert result is None
# Create a second attempt in the same window
second_attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Now we should get the newer attempt
deadline = second_attempt.created_at + timedelta(days=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = verification_for_datetime(deadline, query)
assert result == second_attempt
@ddt.data(
(False, False, False, None, None),
(True, False, False, None, 'photo'),
(False, True, False, None, 'sso'),
(False, False, True, None, 'manual'),
(True, True, True, 'photo', 'sso'),
(True, True, True, 'sso', 'photo'),
(True, True, True, 'manual', 'photo')
)
@ddt.unpack
def test_most_recent_verification(
self,
create_photo_verification,
create_sso_verification,
create_manual_verification,
first_verification,
expected_verification):
user = UserFactory.create()
photo_verification = None
sso_verification = None
manual_verification = None
if not first_verification:
if create_photo_verification:
photo_verification = SoftwareSecurePhotoVerification.objects.create(user=user)
if create_sso_verification:
sso_verification = SSOVerification.objects.create(user=user)
if create_manual_verification:
manual_verification = ManualVerification.objects.create(user=user)
elif first_verification == 'photo':
photo_verification = SoftwareSecurePhotoVerification.objects.create(user=user)
sso_verification = SSOVerification.objects.create(user=user)
elif first_verification == 'sso':
sso_verification = SSOVerification.objects.create(user=user)
photo_verification = SoftwareSecurePhotoVerification.objects.create(user=user)
else:
manual_verification = ManualVerification.objects.create(user=user)
photo_verification = SoftwareSecurePhotoVerification.objects.create(user=user)
most_recent = most_recent_verification(
SoftwareSecurePhotoVerification.objects.all(),
SSOVerification.objects.all(),
ManualVerification.objects.all(),
'created_at'
)
if not expected_verification:
assert most_recent is None
elif expected_verification == 'photo':
assert most_recent == photo_verification
elif expected_verification == 'sso':
assert most_recent == sso_verification
else:
assert most_recent == manual_verification
@mock.patch('lms.djangoapps.verify_student.utils.log')
@mock.patch(
'lms.djangoapps.verify_student.tasks.send_request_to_ss_for_user.delay', mock.Mock(side_effect=Exception('error')) # lint-amnesty, pylint: disable=line-too-long
)
def test_submit_request_to_ss(self, mock_log):
"""Tests that we log appropriate information when celery task creation fails."""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
attempt.mark_ready()
submit_request_to_ss(user_verification=attempt, copy_id_photo_from=None)
mock_log.error.assert_called_with(
"Software Secure submit request %r failed, result: %s",
user.username,
'error'
)
assert attempt.status, SoftwareSecurePhotoVerification.STATUS.must_retry
|
tomaslu/map_editor | refs/heads/master | lib/svg.py | 1 | '''
Created on Jan 5, 2014
'''
from lxml import etree
import subprocess
class SVG(object):
def __init__(self, svg_path):
self.svg_path = svg_path
with open(self.svg_path, 'r') as f:
content = f.read()
try:
self.tree = etree.fromstring(content)
self.width = int(float(self.tree.get('width')))
self.height = int(float(self.tree.get('height')))
except Exception as e:
print(e)
def convert(self, new_file, width, height=None):
if not height:
height = self.height*width/self.width
subprocess.call(['inkscape', '-z', '-e', new_file,
'-w {}'.format(width), '-h {}'.format(height),
self.svg_path])
|
msfrank/terane | refs/heads/master | terane/inputs/__init__.py | 1 | # Copyright 2010,2011,2012 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of Terane.
#
# Terane is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Terane is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Terane. If not, see <http://www.gnu.org/licenses/>.
from twisted.application.service import IService, Service
from terane.plugins import ILoadable
from terane.signals import Signal
from terane.bier.event import Event
class IInput(IService, ILoadable):
def configure(section):
"Configure the input."
def getContract():
"Return a Contract describing the fields which the Input emits."
def getDispatcher():
"Return an Dispatcher which the input uses to signal new events."
class Input(Service):
"""
The Input base implementation.
"""
def __init__(self, plugin, name, eventfactory):
self.plugin = plugin
self.name = name
def configure(self, section):
pass
def startService(self):
Service.startService(self)
def stopService(self):
return Service.stopService(self)
|
hsaputra/tensorflow | refs/heads/master | tensorflow/python/keras/preprocessing/sequence/__init__.py | 70 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for sequence data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.preprocessing.sequence import make_sampling_table
from tensorflow.python.keras._impl.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras._impl.keras.preprocessing.sequence import skipgrams
del absolute_import
del division
del print_function
|
armink/rt-thread | refs/heads/master | tools/wizard.py | 16 | #! /usr/bin/env python
#coding=utf-8
#
# File : wizard.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
"""
wizard.py - a script to generate SConscript in RT-Thread RTOS.
`wizard --component name' to generate SConscript for name component.
`wizard --bridge' to generate SConscript as a bridge to connect each
SConscript script file of sub-directory.
"""
import sys
SConscript_com = '''# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
group = DefineGroup('COMPONENT_NAME', src, depend = [''], CPPPATH = CPPPATH)
Return('group')
'''
SConscript_bridge = '''# RT-Thread building script for bridge
import os
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')
'''
def usage():
print('wizard --component name')
print('wizard --bridge')
def gen_component(name):
print('generate SConscript for ' + name)
text = SConscript_com.replace('COMPONENT_NAME', name)
f = open('SConscript', 'w')
f.write(text)
f.close()
def gen_bridge():
print('generate SConscript for bridge')
f = open('SConscript', 'w')
f.write(SConscript_bridge)
f.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
usage()
sys.exit(2)
if sys.argv[1] == '--component':
gen_component(sys.argv[2])
elif sys.argv[1] == '--bridge':
gen_bridge()
else:
usage()
|
andyclymer/ControlBoard | refs/heads/master | Examples/ExampleScripts/SetZoom.py | 1 | import vanilla
from mojo.events import addObserver, removeObserver
from mojo.UI import CurrentGlyphWindow
class ZoomWindow:
def __init__(self):
"""
ControlBoard
"Set Zoom" demo
Use a potentiometer to set the zoom scale of the CurrentGlyphWindow.
"""
self.w = vanilla.Window((100, 100), "Zoom")
self.w.bind("close", self.windowClosed)
self.w.open()
# Add an observer to watch for a component's state to change
addObserver(self, "controlChanged", "ControlBoardInput")
self.controlName = "Zoom"
def controlChanged(self, info):
# Check that the control that changed has the name were looking for:
if info["name"] == self.controlName:
# If there's a glyph window open
w = CurrentGlyphWindow()
if w:
# Set the zoom scale of the window.
# Scale the incoming "value" of the control to somewhere between 0.15% and 3000%
scale = (info["value"] * 30) + 0.15
w.setGlyphViewScale(scale)
def windowClosed(self, sender):
removeObserver(self, "ControlBoardInput")
ZoomWindow() |
vipul-sharma20/oh-mainline | refs/heads/master | vendor/packages/Django/tests/regressiontests/context_processors/urls.py | 150 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
(r'^request_attrs/$', views.request_processor),
)
|
evaautomation/glibc-linaro | refs/heads/master | localedata/unicode-gen/utf8_compatibility.py | 4 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
'''
This script is useful for checking backward compatibility of newly
generated UTF-8 file from utf8_gen.py script
To see how this script is used, call it with the “-h” option:
$ ./utf8_compatibility.py -h
… prints usage message …
'''
import sys
import re
import argparse
import unicode_utils
def create_charmap_dictionary(file_name):
'''Create a dictionary for all code points found in the CHARMAP
section of a file
'''
with open(file_name, mode='r') as utf8_file:
charmap_dictionary = {}
for line in utf8_file:
if line.startswith('CHARMAP'):
break
for line in utf8_file:
if line.startswith('END CHARMAP'):
return charmap_dictionary
if line.startswith('%'):
continue
match = re.match(
r'^<U(?P<codepoint1>[0-9A-F]{4,8})>'
+r'(:?\.\.<U(?P<codepoint2>[0-9-A-F]{4,8})>)?'
+r'\s+(?P<hexutf8>(/x[0-9a-f]{2}){1,4})',
line)
if not match:
continue
codepoint1 = match.group('codepoint1')
codepoint2 = match.group('codepoint2')
if not codepoint2:
codepoint2 = codepoint1
for i in range(int(codepoint1, 16),
int(codepoint2, 16) + 1):
charmap_dictionary[i] = match.group('hexutf8')
sys.stderr.write('No “CHARMAP” or no “END CHARMAP” found in %s\n'
%file_name)
exit(1)
def check_charmap(original_file_name, new_file_name):
'''Report differences in the CHARMAP section between the old and the
new file
'''
print('************************************************************')
print('Report on CHARMAP:')
ocharmap = create_charmap_dictionary(original_file_name)
ncharmap = create_charmap_dictionary(new_file_name)
print('------------------------------------------------------------')
print('Total removed characters in newly generated CHARMAP: %d'
%len(set(ocharmap)-set(ncharmap)))
if ARGS.show_missing_characters:
for key in sorted(set(ocharmap)-set(ncharmap)):
print('removed: {:s} {:s} {:s}'.format(
unicode_utils.ucs_symbol(key),
ocharmap[key],
unicode_utils.UNICODE_ATTRIBUTES[key]['name'] \
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
print('------------------------------------------------------------')
changed_charmap = {}
for key in set(ocharmap).intersection(set(ncharmap)):
if ocharmap[key] != ncharmap[key]:
changed_charmap[key] = (ocharmap[key], ncharmap[key])
print('Total changed characters in newly generated CHARMAP: %d'
%len(changed_charmap))
if ARGS.show_changed_characters:
for key in sorted(changed_charmap):
print('changed: {:s} {:s}->{:s} {:s}'.format(
unicode_utils.ucs_symbol(key),
changed_charmap[key][0],
changed_charmap[key][1],
unicode_utils.UNICODE_ATTRIBUTES[key]['name'] \
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
print('------------------------------------------------------------')
print('Total added characters in newly generated CHARMAP: %d'
%len(set(ncharmap)-set(ocharmap)))
if ARGS.show_added_characters:
for key in sorted(set(ncharmap)-set(ocharmap)):
print('added: {:s} {:s} {:s}'.format(
unicode_utils.ucs_symbol(key),
ncharmap[key],
unicode_utils.UNICODE_ATTRIBUTES[key]['name'] \
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
def create_width_dictionary(file_name):
'''Create a dictionary for all code points found in the WIDTH
section of a file
'''
with open(file_name, mode='r') as utf8_file:
width_dictionary = {}
for line in utf8_file:
if line.startswith('WIDTH'):
break
for line in utf8_file:
if line.startswith('END WIDTH'):
return width_dictionary
match = re.match(
r'^<U(?P<codepoint1>[0-9A-F]{4,8})>'
+r'(:?\.\.\.<U(?P<codepoint2>[0-9-A-F]{4,8})>)?'
+r'\s+(?P<width>[02])',
line)
if not match:
continue
codepoint1 = match.group('codepoint1')
codepoint2 = match.group('codepoint2')
if not codepoint2:
codepoint2 = codepoint1
for i in range(int(codepoint1, 16),
int(codepoint2, 16) + 1):
width_dictionary[i] = int(match.group('width'))
sys.stderr.write('No “WIDTH” or no “END WIDTH” found in %s\n' %file)
def check_width(original_file_name, new_file_name):
'''Report differences in the WIDTH section between the old and the new
file
'''
print('************************************************************')
print('Report on WIDTH:')
owidth = create_width_dictionary(original_file_name)
nwidth = create_width_dictionary(new_file_name)
print('------------------------------------------------------------')
print('Total removed characters in newly generated WIDTH: %d'
%len(set(owidth)-set(nwidth)))
print('(Characters not in WIDTH get width 1 by default, '
+ 'i.e. these have width 1 now.)')
if ARGS.show_missing_characters:
for key in sorted(set(owidth)-set(nwidth)):
print('removed: {:s} '.format(unicode_utils.ucs_symbol(key))
+ '{:d} : '.format(owidth[key])
+ 'eaw={:s} '.format(
unicode_utils.EAST_ASIAN_WIDTHS[key]
if key in unicode_utils.EAST_ASIAN_WIDTHS else 'None')
+ 'category={:2s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['category']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'bidi={:3s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['bidi']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'name={:s}'.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['name']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
print('------------------------------------------------------------')
changed_width = {}
for key in set(owidth).intersection(set(nwidth)):
if owidth[key] != nwidth[key]:
changed_width[key] = (owidth[key], nwidth[key])
print('Total changed characters in newly generated WIDTH: %d'
%len(changed_width))
if ARGS.show_changed_characters:
for key in sorted(changed_width):
print('changed width: {:s} '.format(unicode_utils.ucs_symbol(key))
+ '{:d}->{:d} : '.format(changed_width[key][0],
changed_width[key][1])
+ 'eaw={:s} '.format(
unicode_utils.EAST_ASIAN_WIDTHS[key]
if key in unicode_utils.EAST_ASIAN_WIDTHS else 'None')
+ 'category={:2s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['category']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'bidi={:3s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['bidi']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'name={:s}'.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['name']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
print('------------------------------------------------------------')
print('Total added characters in newly generated WIDTH: %d'
%len(set(nwidth)-set(owidth)))
print('(Characters not in WIDTH get width 1 by default, '
+ 'i.e. these had width 1 before.)')
if ARGS.show_added_characters:
for key in sorted(set(nwidth)-set(owidth)):
print('added: {:s} '.format(unicode_utils.ucs_symbol(key))
+ '{:d} : '.format(nwidth[key])
+ 'eaw={:s} '.format(
unicode_utils.EAST_ASIAN_WIDTHS[key]
if key in unicode_utils.EAST_ASIAN_WIDTHS else 'None')
+ 'category={:2s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['category']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'bidi={:3s} '.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['bidi']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None')
+ 'name={:s}'.format(
unicode_utils.UNICODE_ATTRIBUTES[key]['name']
if key in unicode_utils.UNICODE_ATTRIBUTES else 'None'))
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description='''
Compare the contents of LC_CTYPE in two files and check for errors.
''')
PARSER.add_argument(
'-o', '--old_utf8_file',
nargs='?',
required=True,
type=str,
help='The old UTF-8 file.')
PARSER.add_argument(
'-n', '--new_utf8_file',
nargs='?',
required=True,
type=str,
help='The new UTF-8 file.')
PARSER.add_argument(
'-u', '--unicode_data_file',
nargs='?',
type=str,
help='The UnicodeData.txt file to read.')
PARSER.add_argument(
'-e', '--east_asian_width_file',
nargs='?',
type=str,
help='The EastAsianWidth.txt file to read.')
PARSER.add_argument(
'-a', '--show_added_characters',
action='store_true',
help='Show characters which were added in detail.')
PARSER.add_argument(
'-m', '--show_missing_characters',
action='store_true',
help='Show characters which were removed in detail.')
PARSER.add_argument(
'-c', '--show_changed_characters',
action='store_true',
help='Show characters whose width was changed in detail.')
ARGS = PARSER.parse_args()
if ARGS.unicode_data_file:
unicode_utils.fill_attributes(ARGS.unicode_data_file)
if ARGS.east_asian_width_file:
unicode_utils.fill_east_asian_widths(ARGS.east_asian_width_file)
check_charmap(ARGS.old_utf8_file, ARGS.new_utf8_file)
check_width(ARGS.old_utf8_file, ARGS.new_utf8_file)
|
Wingless-Archangel/CustomCommand | refs/heads/master | Python Practice/learn_django/lib/python3.6/site-packages/setuptools/command/bdist_wininst.py | 991 | import distutils.command.bdist_wininst as orig
class bdist_wininst(orig.bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
"""
Supplement reinitialize_command to work around
http://bugs.python.org/issue20819
"""
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None
return cmd
def run(self):
self._is_running = True
try:
orig.bdist_wininst.run(self)
finally:
self._is_running = False
|
fivejjs/PTVS | refs/heads/master | Python/Product/PythonTools/Templates/Projects/StarterBottleProject/routes.py | 19 | """
Routes and views for the bottle application.
"""
from bottle import route, view
from datetime import datetime
@route('/')
@route('/home')
@view('index')
def home():
"""Renders the home page."""
return dict(
year=datetime.now().year
)
@route('/contact')
@view('contact')
def contact():
"""Renders the contact page."""
return dict(
title='Contact',
message='Your contact page.',
year=datetime.now().year
)
@route('/about')
@view('about')
def about():
"""Renders the about page."""
return dict(
title='About',
message='Your application description page.',
year=datetime.now().year
)
|
cmlasu/smm_gem5 | refs/heads/smm | src/python/m5/simulate.py | 23 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
import atexit
import os
import sys
# import the SWIG-wrapped main C++ functions
import internal
import core
import stats
import SimObject
import ticks
import objects
from m5.util.dot_writer import do_dot
from m5.internal.stats import updateEvents as updateStatEvents
from util import fatal
from util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
"atomic_noncaching" : objects.params.atomic_noncaching,
}
# The final hook to generate .ini files. Called from the user script
# once the config is built.
def instantiate(ckpt_dir=None):
from m5 import options
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
if options.dump_config:
ini_file = file(os.path.join(options.outdir, options.dump_config), 'w')
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = file(os.path.join(options.outdir, options.json_config), 'w')
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
do_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
# Do a third pass to initialize statistics
for obj in root.descendants(): obj.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants(): obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants(): obj.regProbeListeners()
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
ckpt = internal.core.getCheckpoint(ckpt_dir)
internal.core.unserializeGlobals(ckpt);
for obj in root.descendants(): obj.loadState(ckpt)
need_resume.append(root)
else:
for obj in root.descendants(): obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_resume = []
need_startup = True
def simulate(*args, **kwargs):
global need_resume, need_startup
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
# We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(internal.core.doExitCleanup)
# Reset to put the stats in a consistent state.
stats.reset()
for root in need_resume:
resume(root)
need_resume = []
return internal.event.simulate(*args, **kwargs)
# Export curTick to user script.
def curTick():
return internal.core.curTick()
# Drain the system in preparation of a checkpoint or memory mode
# switch.
def drain(root):
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
all_drained = False
dm = internal.drain.createDrainManager()
unready_objs = sum(obj.drain(dm) for obj in root.descendants())
# If we've got some objects that can't drain immediately, then simulate
if unready_objs > 0:
dm.setCount(unready_objs)
#WARNING: if a valid exit event occurs while draining, it will not
# get returned to the user script
exit_event = simulate()
while exit_event.getCause() != 'Finished drain':
exit_event = simulate()
else:
all_drained = True
internal.drain.cleanupDrainManager(dm)
return all_drained
all_drained = _drain()
while (not all_drained):
all_drained = _drain()
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def resume(root):
for obj in root.descendants(): obj.drainResume()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError, "Checkpoint must be called on a root object."
drain(root)
memWriteback(root)
print "Writing checkpoint"
internal.core.serializeAll(dir)
resume(root)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError, "Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System)
if system.getMemoryMode() != mode:
drain(system)
system.setMemoryMode(mode)
else:
print "System already in target mode. Memory mode unchanged."
def switchCpus(system, cpuList, do_drain=True, verbose=True):
"""Switch CPUs in a system.
By default, this method drains and resumes the system. This
behavior can be disabled by setting the keyword argument
'do_drain' to false, which might be desirable if multiple
operations requiring a drained system are going to be performed in
sequence.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
Keyword Arguments:
do_drain -- Perform a drain/resume of the system when switching.
"""
if verbose:
print "switching cpus"
if not isinstance(cpuList, list):
raise RuntimeError, "Must pass a list to this function"
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError, "List must have tuples of (oldCPU,newCPU)"
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % old_cpu
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % new_cpu
if new_cpu in old_cpu_set:
raise RuntimeError, \
"New CPU (%s) is in the list of old CPUs." % (old_cpu,)
if not new_cpu.switchedOut():
raise RuntimeError, \
"New CPU (%s) is already active." % (new_cpu,)
if not new_cpu.support_take_over():
raise RuntimeError, \
"New CPU (%s) does not support CPU handover." % (old_cpu,)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError, \
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0])
if old_cpu.switchedOut():
raise RuntimeError, \
"Old CPU (%s) is inactive." % (new_cpu,)
if not old_cpu.support_take_over():
raise RuntimeError, \
"Old CPU (%s) does not support CPU handover." % (old_cpu,)
try:
memory_mode = _memory_modes[memory_mode_name]
except KeyError:
raise RuntimeError, "Invalid memory mode (%s)" % memory_mode_name
if do_drain:
drain(system)
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == objects.params.atomic_noncaching:
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
if do_drain:
resume(system)
from internal.core import disableAllListeners
|
efortuna/AndroidSDKClone | refs/heads/master | ndk/prebuilt/linux-x86_64/lib/python2.7/distutils/tests/test_config.py | 95 | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
import tempfile
import shutil
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.log import set_threshold
from distutils.log import WARN
from distutils.tests import support
from test.test_support import run_unittest
PYPIRC = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
WANTED = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:xxx
"""
class PyPIRCCommandTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
"""Patches the environment."""
super(PyPIRCCommandTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
os.environ['HOME'] = self.tmp_dir
self.rc = os.path.join(self.tmp_dir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
self.old_threshold = set_threshold(WARN)
def tearDown(self):
"""Removes the patch."""
set_threshold(self.old_threshold)
super(PyPIRCCommandTestCase, self).tearDown()
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
# old format
self.write_file(self.rc, PYPIRC_OLD)
config = cmd._read_pypirc()
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
self.assertTrue(not os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
try:
content = f.read()
self.assertEqual(content, WANTED)
finally:
f.close()
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
peterfpeterson/mantid | refs/heads/master | scripts/SliceViewAnimator.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=too-many-arguments
import numpy
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
from PyQt4.QtCore import QByteArray, QBuffer
def animate_slice(sliceviewer, name, start, end, filename, num_frames=10, font_size=24):
"""Generate an animated gif of a 2D slice moving through a third dimension.
Args:
sliceviewer (SliceViewer): A sliceviewer instance.
name (str): The name of the third dimension to use.
start (float): The starting value of the third dimension.
end (float): The end value of the third dimension.
filename (str): The file to save the gif to.
Kwargs:
num_frames (int): The number of frames the gif should contain.
font_size: (int): The size of the caption.
Example:
ws = CreateMDWorkspace(3, Extents=[-10,10,-10,10,-10,10], Names=["X","Y","Z"], Units=["u","u","u"])
FakeMDEventData(ws, PeakParams=[10000,0,0,0,1])
sv = plotSlice(ws)
#Resize and configure the slice viewer how you want the output to look
sv.setNormalization(1) # We need to normalize by volume in this case, or the data won't show up
#This will create a gif iterating from Z = -1 to Z = 1
animate_slice(sv, "Z", -1, 1, "output.gif")
"""
#Generate all the individual frames
images = []
for slice_point in numpy.linspace(start, end, num_frames):
sliceviewer.setSlicePoint(name, slice_point)
sliceviewer.refreshRebin()
qimg = sliceviewer.getImage().toImage()
data = QByteArray()
buf = QBuffer(data)
qimg.save(buf, "PNG")
image = Image(blob=str(data))
captionstrip_size = font_size + 10
#To add whitespace to the top, we add a vertical border,
#then crop out the bottom border
image.border(Color("#fff"), 0, captionstrip_size)
image.crop(0, 0, image.width, image.height - captionstrip_size)
#Write the caption into the whitespace
draw = Drawing()
draw.font_size = font_size
draw.text(5, font_size,"%s = %g" % (name,slice_point))
draw(image)
images.append(image)
#Create a new image with the right dimensions
animation = Image(width=images[0].width, height=images[0].height)
#Copy in the frames from all the generated images
for image in images:
animation.sequence.append(image.sequence[0])
#Drop the initial blank frame
del animation.sequence[0]
#Write the animation to disk
animation.save(filename=filename)
|
sqggles/sqlalchemy_dremio | refs/heads/master | sqlalchemy_dremio/requirements.py | 1 | from sqlalchemy.testing.requirements import SuiteRequirements
class Requirements(SuiteRequirements):
pass
|
allotria/intellij-community | refs/heads/master | python/testData/requirement/generation/baseFileUpdate/main.py | 20 | import requests
import Django
import pandas
import cookiecutter |
salmanwahed/django-tutorial-pollapp | refs/heads/master | polls/models.py | 1 | import datetime
from django.utils import timezone
from django.db import models
# Create your models here.
class Poll(models.Model):
def __unicode__(self):
return self.question
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
def __unicode__(self):
return self.choice_text
poll = models.ForeignKey(Poll)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0) |
sjsucohort6/openstack | refs/heads/master | python/venv/lib/python2.7/site-packages/pytz/lazy.py | 514 | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
|
stephen144/odoo | refs/heads/9.0 | addons/mail/controllers/main.py | 7 | import base64
from operator import itemgetter
import psycopg2
import werkzeug
from werkzeug import url_encode
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.exceptions import AccessError
from openerp.http import request
from openerp.addons.web.controllers.main import binary_content
class MailController(http.Controller):
_cp_path = '/mail'
def _redirect_to_messaging(self):
messaging_action = request.env['mail.thread']._get_inbox_action_xml_id()
url = '/web#%s' % url_encode({'action': messaging_action})
return werkzeug.utils.redirect(url)
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = dbs[db].decode('base64')
try:
registry = openerp.registry(db)
with registry.cursor() as cr:
mail_thread = registry['mail.thread']
mail_thread.message_process(cr, SUPERUSER_ID, None, message)
except psycopg2.Error:
pass
return True
@http.route('/mail/read_followers', type='json', auth='user')
def read_followers(self, follower_ids):
result = []
is_editable = request.env.user.has_group('base.group_no_one')
for follower in request.env['mail.followers'].browse(follower_ids):
result.append({
'id': follower.id,
'name': follower.partner_id.name or follower.channel_id.name,
'email': follower.partner_id.email if follower.partner_id else None,
'res_model': 'res.partner' if follower.partner_id else 'mail.channel',
'res_id': follower.partner_id.id or follower.channel_id.id,
'is_editable': is_editable,
'is_uid': request.env.user.partner_id == follower.partner_id,
})
return result
@http.route('/mail/read_subscription_data', type='json', auth='user')
def read_subscription_data(self, res_model, res_id, follower_id=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
# find the document followers, update the data
followers = request.env['mail.followers']
if not follower_id:
followers = followers.search([
('partner_id', '=', request.env.user.partner_id.id),
('res_id', '=', res_id),
('res_model', '=', res_model),
])
else:
followers = followers.browse(follower_id)
# find current model subtypes, add them to a dictionary
subtypes = request.env['mail.message.subtype'].search(['&', ('hidden', '=', False), '|', ('res_model', '=', res_model), ('res_model', '=', False)])
subtypes_list = [{
'name': subtype.name,
'res_model': subtype.res_model,
'sequence': subtype.sequence,
'default': subtype.default,
'internal': subtype.internal,
'followed': subtype.id in followers.mapped('subtype_ids').ids,
'parent_model': subtype.parent_id and subtype.parent_id.res_model or False,
'id': subtype.id
} for subtype in subtypes]
subtypes_list = sorted(subtypes_list, key=itemgetter('parent_model', 'res_model', 'internal', 'sequence'))
return subtypes_list
@http.route('/mail/view', type='http', auth='none')
def mail_action_view(self, model=None, res_id=None, message_id=None):
""" Generic access point from notification emails. The heuristic to
choose where to redirect the user is the following :
- find a public URL
- if none found
- users with a read access are redirected to the document
- users without read access are redirected to the Messaging
- not logged users are redirected to the login page
"""
uid = request.session.uid
if message_id:
try:
message = request.env['mail.message'].sudo().browse(int(message_id)).exists()
except:
message = request.env['mail.message']
if message:
model, res_id = message.model, message.res_id
else:
# either a wrong message_id, either someone trying ids -> just go to messaging
return self._redirect_to_messaging()
elif res_id and isinstance(res_id, basestring):
res_id = int(res_id)
# no model / res_id, meaning no possible record -> redirect to login
if not model or not res_id or model not in request.env:
return self._redirect_to_messaging()
# find the access action using sudo to have the details about the access link
RecordModel = request.env[model]
record_sudo = RecordModel.sudo().browse(res_id).exists()
if not record_sudo:
# record does not seem to exist -> redirect to login
return self._redirect_to_messaging()
record_action = record_sudo.get_access_action()
# the record has an URL redirection: use it directly
if record_action['type'] == 'ir.actions.act_url':
return werkzeug.utils.redirect(record_action['url'])
# other choice: act_window (no support of anything else currently)
elif not record_action['type'] == 'ir.actions.act_window':
return self._redirect_to_messaging()
# the record has a window redirection: check access rights
if not RecordModel.sudo(uid).check_access_rights('read', raise_exception=False):
return self._redirect_to_messaging()
try:
RecordModel.sudo(uid).browse(res_id).exists().check_access_rule('read')
except AccessError:
return self._redirect_to_messaging()
query = {}
url_params = {
'view_type': record_action['view_type'],
'model': model,
'id': res_id,
'active_id': res_id,
'view_id': record_sudo.get_formview_id(),
'action': record_action.get('id'),
}
url = '/web?%s#%s' % (url_encode(query), url_encode(url_params))
return werkzeug.utils.redirect(url)
@http.route('/mail/follow', type='http', auth='user')
def mail_action_follow(self, model, res_id):
if model not in request.env:
return self._redirect_to_messaging()
Model = request.env[model]
try:
Model.browse(res_id).message_subscribe_users()
except:
return self._redirect_to_messaging()
return werkzeug.utils.redirect('/mail/view?%s' % url_encode({'model': model, 'res_id': res_id}))
@http.route('/mail/unfollow', type='http', auth='user')
def mail_action_unfollow(self, model, res_id):
if model not in request.env:
return self._redirect_to_messaging()
Model = request.env[model]
try:
Model.browse(res_id).sudo().message_unsubscribe_users([request.uid])
except:
return self._redirect_to_messaging()
return werkzeug.utils.redirect('/mail/view?%s' % url_encode({'model': model, 'res_id': res_id}))
@http.route('/mail/new', type='http', auth='user')
def mail_action_new(self, model, res_id, **kwargs):
if model not in request.env:
return self._redirect_to_messaging()
params = {'view_type': 'form', 'model': model}
if kwargs.get('action_id'):
params['action'] = kwargs['action_id']
return werkzeug.utils.redirect('/web?#%s' % url_encode(params))
@http.route('/mail/method', type='http', auth='user')
def mail_action_method(self, model, res_id, method, **kwargs):
# only public methods / check exists
if method.strip().startswith('_') or model not in request.env:
return self._redirect_to_messaging()
Model = request.env[model]
try:
record = Model.browse(int(res_id)).exists()
getattr(record, method)()
except:
return self._redirect_to_messaging()
return werkzeug.utils.redirect('/mail/view?%s' % url_encode({'model': model, 'res_id': res_id}))
@http.route('/mail/assign', type='http', auth='user')
def mail_action_assign(self, model, res_id, **kwargs):
if model not in request.env:
return self._redirect_to_messaging()
Model = request.env[model]
try:
Model.browse(int(res_id)).exists().write({'user_id': request.uid})
except:
return self._redirect_to_messaging()
return werkzeug.utils.redirect('/mail/view?%s' % url_encode({'model': model, 'res_id': res_id}))
@http.route('/mail/workflow', type='http', auth='user')
def mail_action_workflow(self, model, res_id, signal, **kwargs):
if model not in request.env:
return self._redirect_to_messaging()
Model = request.env[model]
try:
Model.browse(int(res_id)).exists().signal_workflow(signal)
except:
return self._redirect_to_messaging()
return werkzeug.utils.redirect('/mail/view?%s' % url_encode({'model': model, 'res_id': res_id}))
@http.route('/mail/<string:res_model>/<int:res_id>/avatar/<int:partner_id>', type='http', auth='public')
def avatar(self, res_model, res_id, partner_id):
headers = [[('Content-Type', 'image/png')]]
content = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' # default image is one white pixel
if res_model in request.env:
try:
# if the current user has access to the document, get the partner avatar as sudo()
request.env[res_model].browse(res_id).check_access_rule('read')
if partner_id in request.env[res_model].browse(res_id).sudo().exists().message_ids.mapped('author_id').ids:
status, headers, content = binary_content(model='res.partner', id=partner_id, field='image_medium', default_mimetype='image/png', env=request.env(user=openerp.SUPERUSER_ID))
if status == 304:
return werkzeug.wrappers.Response(status=304)
except AccessError:
pass
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status = str(status)
return response
@http.route('/mail/needaction', type='json', auth='user')
def needaction(self):
return request.env['res.partner'].get_needaction_count()
@http.route('/mail/client_action', type='json', auth='user')
def mail_client_action(self):
values = {
'needaction_inbox_counter': request.env['res.partner'].get_needaction_count(),
'channel_slots': request.env['mail.channel'].channel_fetch_slot(),
'mention_partner_suggestions': request.env['res.partner'].get_static_mention_suggestions(),
}
return values
|
webmasterraj/FogOrNot | refs/heads/master | flask/lib/python2.7/site-packages/numpy/distutils/tests/gen_ext/setup.py | 135 | #!/usr/bin/env python
from __future__ import division, print_function
fib3_f = '''
C FILE: FIB3.F
SUBROUTINE FIB(A,N)
C
C CALCULATE FIRST N FIBONACCI NUMBERS
C
INTEGER N
REAL*8 A(N)
Cf2py intent(in) n
Cf2py intent(out) a
Cf2py depend(n) a
DO I=1,N
IF (I.EQ.1) THEN
A(I) = 0.0D0
ELSEIF (I.EQ.2) THEN
A(I) = 1.0D0
ELSE
A(I) = A(I-1) + A(I-2)
ENDIF
ENDDO
END
C END FILE FIB3.F
'''
def source_func(ext, build_dir):
import os
from distutils.dep_util import newer
target = os.path.join(build_dir, 'fib3.f')
if newer(__file__, target):
f = open(target, 'w')
f.write(fib3_f)
f.close()
return [target]
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('gen_ext', parent_package, top_path)
config.add_extension('fib3',
[source_func]
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
|
grzes/djangae | refs/heads/master | djangae/models.py | 13 | from django.db import models
from djangae import patches
class CounterShard(models.Model):
count = models.PositiveIntegerField()
label = models.CharField(max_length=500)
class Meta:
app_label = "djangae"
|
dcjohnson1989/selenium | refs/heads/master | py/test/selenium/webdriver/common/cookie_tests.py | 61 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
import time
import unittest
import random
import pytest
from selenium.test.selenium.webdriver.common import utils
class CookieTest(unittest.TestCase):
def setUp(self):
self._loadPage("simpleTest")
# Set the cookie to expire in 30 minutes
timestamp = calendar.timegm(time.gmtime()) + (30 * 60)
self.COOKIE_A = {"name": "foo",
"value": "bar",
"path": "/",
"secure": False}
def tearDown(self):
self.driver.delete_all_cookies()
def testAddCookie(self):
self.driver.execute_script("return document.cookie")
self.driver.add_cookie(self.COOKIE_A)
cookie_returned = str(self.driver.execute_script("return document.cookie"))
self.assertTrue(self.COOKIE_A["name"] in cookie_returned)
def testAddingACookieThatExpiredInThePast(self):
if self.driver.name == 'internet explorer':
pytest.skip("Issue needs investigating")
cookie = self.COOKIE_A.copy()
cookie["expiry"] = calendar.timegm(time.gmtime()) - 1
self.driver.add_cookie(cookie)
cookies = self.driver.get_cookies()
self.assertEquals(0, len(cookies))
def testDeleteAllCookie(self):
self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))
self.driver.delete_all_cookies()
self.assertFalse(self.driver.get_cookies())
def testDeleteCookie(self):
self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))
self.driver.delete_cookie("foo")
self.assertFalse(self.driver.get_cookies())
def testShouldGetCookieByName(self):
key = "key_%d" % int(random.random()*10000000)
self.driver.execute_script("document.cookie = arguments[0] + '=set';", key)
cookie = self.driver.get_cookie(key)
self.assertEquals("set", cookie["value"])
def testGetAllCookies(self):
key1 = "key_%d" % int(random.random()*10000000)
key2 = "key_%d" % int(random.random()*10000000)
cookies = self.driver.get_cookies()
count = len(cookies)
one = {"name" :key1,
"value": "value"}
two = {"name":key2,
"value": "value"}
self.driver.add_cookie(one)
self.driver.add_cookie(two)
self._loadPage("simpleTest")
cookies = self.driver.get_cookies()
self.assertEquals(count + 2, len(cookies))
def testShouldNotDeleteCookiesWithASimilarName(self):
cookieOneName = "fish"
cookie1 = {"name" :cookieOneName,
"value":"cod"}
cookie2 = {"name" :cookieOneName + "x",
"value": "earth"}
self.driver.add_cookie(cookie1)
self.driver.add_cookie(cookie2)
self.driver.delete_cookie(cookieOneName)
cookies = self.driver.get_cookies()
self.assertFalse(cookie1["name"] == cookies[0]["name"], msg=str(cookies))
self.assertEquals(cookie2["name"] , cookies[0]["name"], msg=str(cookies))
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
|
okin/nikola | refs/heads/master | nikola/plugins/command/github_deploy.py | 1 | # -*- coding: utf-8 -*-
# Copyright © 2014-2019 Puneeth Chaganti and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Deploy site to GitHub Pages."""
import os
import subprocess
from textwrap import dedent
from nikola.plugin_categories import Command
from nikola.plugins.command.check import real_scan_files
from nikola.utils import req_missing, clean_before_deployment
from nikola.__main__ import main
from nikola import __version__
def uni_check_output(*args, **kwargs):
"""Run command and return output as Unicode (UTf-8)."""
o = subprocess.check_output(*args, **kwargs)
return o.decode('utf-8')
def check_ghp_import_installed():
"""Check if ghp-import is installed."""
try:
subprocess.check_output(['ghp-import', '-h'])
except OSError:
# req_missing defaults to `python=True` — and it’s meant to be like this.
# `ghp-import` is installed via pip, but the only way to use it is by executing the script it installs.
req_missing(['ghp-import2'], 'deploy the site to GitHub Pages')
class CommandGitHubDeploy(Command):
"""Deploy site to GitHub Pages."""
name = 'github_deploy'
doc_usage = '[-m COMMIT_MESSAGE]'
doc_purpose = 'deploy the site to GitHub Pages'
doc_description = dedent(
"""\
This command can be used to deploy your site to GitHub Pages. It uses ghp-import to do this task. It also optionally commits to the source branch.
Configuration help: https://getnikola.com/handbook.html#deploying-to-github"""
)
cmd_options = [
{
'name': 'commit_message',
'short': 'm',
'long': 'message',
'default': 'Nikola auto commit.',
'type': str,
'help': 'Commit message (default: Nikola auto commit.)',
},
]
def _execute(self, options, args):
"""Run the deployment."""
# Check if ghp-import is installed
check_ghp_import_installed()
# Build before deploying
build = main(['build'])
if build != 0:
self.logger.error('Build failed, not deploying to GitHub')
return build
# Clean non-target files
only_on_output, _ = real_scan_files(self.site)
for f in only_on_output:
os.unlink(f)
# Remove drafts and future posts if requested (Issue #2406)
undeployed_posts = clean_before_deployment(self.site)
if undeployed_posts:
self.logger.warning("Deleted {0} posts due to DEPLOY_* settings".format(len(undeployed_posts)))
# Commit and push
self._commit_and_push(options['commit_message'])
return
def _run_command(self, command, xfail=False):
"""Run a command that may or may not fail."""
self.logger.info("==> {0}".format(command))
try:
subprocess.check_call(command)
return 0
except subprocess.CalledProcessError as e:
if xfail:
return e.returncode
self.logger.error(
'Failed GitHub deployment -- command {0} '
'returned {1}'.format(e.cmd, e.returncode)
)
raise SystemError(e.returncode)
def _commit_and_push(self, commit_first_line):
"""Commit all the files and push."""
source = self.site.config['GITHUB_SOURCE_BRANCH']
deploy = self.site.config['GITHUB_DEPLOY_BRANCH']
remote = self.site.config['GITHUB_REMOTE_NAME']
autocommit = self.site.config['GITHUB_COMMIT_SOURCE']
try:
if autocommit:
commit_message = (
'{0}\n\n'
'Nikola version: {1}'.format(commit_first_line, __version__)
)
e = self._run_command(['git', 'checkout', source], True)
if e != 0:
self._run_command(['git', 'checkout', '-b', source])
self._run_command(['git', 'add', '.'])
# Figure out if there is anything to commit
e = self._run_command(['git', 'diff-index', '--quiet', 'HEAD'], True)
if e != 0:
self._run_command(['git', 'commit', '-am', commit_message])
else:
self.logger.info('Nothing to commit to source branch.')
try:
source_commit = uni_check_output(['git', 'rev-parse', source])
except subprocess.CalledProcessError:
try:
source_commit = uni_check_output(['git', 'rev-parse', 'HEAD'])
except subprocess.CalledProcessError:
source_commit = '?'
commit_message = (
'{0}\n\n'
'Source commit: {1}'
'Nikola version: {2}'.format(commit_first_line, source_commit, __version__)
)
output_folder = self.site.config['OUTPUT_FOLDER']
command = ['ghp-import', '-n', '-m', commit_message, '-p', '-r', remote, '-b', deploy, output_folder]
self._run_command(command)
if autocommit:
self._run_command(['git', 'push', '-u', remote, source])
except SystemError as e:
return e.args[0]
self.logger.info("Successful deployment")
|
artofhuman/django-geoip | refs/heads/dev | tests/test_utils.py | 3 | # -*- coding: utf-8 -*-
from django.test import TestCase
from mock import patch
from django_geoip.utils import get_mod_func, get_class
class UtilsTest(TestCase):
def test_get_mod_func(self):
test_hash = {
'django.views.news.stories.story_detail': ('django.views.news.stories', 'story_detail'),
'django': ('django', ''),
}
for klass, expected_result in test_hash.items():
self.assertEqual(get_mod_func(klass), expected_result)
@patch('django.contrib.sessions.backends.base.SessionBase')
def test_get_class(self, SessionBase):
""" FIXME: change to fake class"""
test_hash = {
'django.contrib.sessions.backends.base.SessionBase': SessionBase,
}
for class_string, expected_class_instance in test_hash.items():
self.assertEqual(get_class(class_string), expected_class_instance)
self.assertRaises(ImportError, get_class, 'django_geoip.fake') |
bootandy/sqlalchemy | refs/heads/master | lib/sqlalchemy/pool.py | 49 | # sqlalchemy/pool.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the
:meth:`.Pool.unique_connection` method which **does not use thread
local context**. To produce a :class:`.Connection` which refers
to the :meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``"none"`` - same as ``None``
.. versionadded:: 0.9.10
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in ('none', None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__['_creator']
@_creator.setter
def _creator(self, creator):
self.__dict__['_creator'] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (['connection_record'], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the
:meth:`.Pool.unique_connection` method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.finalize_callback = deque()
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
with util.safe_reraise():
rec.checkin()
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
rec, pool, ref, echo)
)
_refs.add(rec)
if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.info.clear()
self.connection = self.__connect()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; " +
"recycling",
self.connection
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; " +
"recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.info.clear()
# ensure that if self.__connect() fails,
# we are not referring to the previous stale connection here
self.connection = None
self.connection = self.__connect()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
self.finalize_callback.clear()
self.__pool._close_connection(self.connection)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._invoke_creator(self)
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception as e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record,
pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
try:
fairy.connection = \
fairy._connection_record.get_connection()
except:
with util.safe_reraise():
fairy._connection_record.checkin()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy_ref = None
self._connection_record.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including
:paramref:`.Pool.recycle`, :paramref:`.Pool.echo`,
:paramref:`.Pool.reset_on_return` and others are passed to the
:class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
|
mrshu/PyTox | refs/heads/master | tests/tests.py | 1 | #
# @file tests.py
# @author Wei-Ning Huang (AZ) <aitjcize@gmail.com>
#
# Copyright (C) 2013 - 2014 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import hashlib
import os
import re
import sys
import unittest
from pytox import Tox, OperationFailedError
from time import sleep
ADDR_SIZE = 76
CLIENT_ID_SIZE = 64
def unittest_skip(reason):
def _wrap1(func):
def _wrap2(self, *args, **kwargs):
pass
return _wrap2
return _wrap1
def patch_unittest():
major, minor, micro, release, serial = sys.version_info
if major == 2 and minor <= 6:
unittest.skip = unittest_skip
# Patch unittest for Python version <= 2.6
patch_unittest()
class AliceTox(Tox):
pass
class BobTox(Tox):
pass
class ToxTest(unittest.TestCase):
def setUp(self):
self.alice = AliceTox()
self.bob = BobTox()
self.loop_until_connected()
def tearDown(self):
"""
t:kill
"""
self.alice.kill()
self.bob.kill()
def loop(self, n):
"""
t:do
t:do_interval
"""
interval = self.bob.do_interval()
for i in range(n):
self.alice.do()
self.bob.do()
sleep(interval / 2000.0)
def loop_until_connected(self):
"""
t:isconnected
"""
while not self.alice.isconnected() or not self.bob.isconnected():
self.loop(50)
def wait_callback(self, obj, attr):
count = 0
THRESHOLD = 200
while not getattr(obj, attr):
self.loop(50)
if count >= THRESHOLD:
return False
count += 1
return True
def wait_callbacks(self, obj, attrs):
count = 0
THRESHOLD = 400
while not all([getattr(obj, attr) for attr in attrs]):
self.loop(50)
if count >= THRESHOLD:
return False
count += 1
return True
def ensure_exec(self, method, args):
count = 0
THRESHOLD = 200
while True:
try:
ret = method(*args)
break
except:
self.loop(50)
assert count < THRESHOLD
count += 1
return ret
def bob_add_alice_as_friend(self):
"""
t:add_friend
t:add_friend_norequest
t:on_friend_request
t:get_friend_id
"""
MSG = 'Hi, this is Bob.'
bob_addr = self.bob.get_address()
def on_friend_request(self, pk, message):
assert pk == bob_addr[:CLIENT_ID_SIZE]
assert message == MSG
self.add_friend_norequest(pk)
self.fr = True
AliceTox.on_friend_request = on_friend_request
alice_addr = self.alice.get_address()
self.alice.fr = False
self.bob.add_friend(alice_addr, MSG)
assert self.wait_callback(self.alice, 'fr')
AliceTox.on_friend_request = Tox.on_friend_request
self.bid = self.alice.get_friend_id(bob_addr)
self.aid = self.bob.get_friend_id(alice_addr)
#: Wait until both are online
def on_connection_status(self, friend_id, status):
assert status is True
self.cs = True
def on_user_status(self, friend_id, new_status):
self.us = True
AliceTox.on_connection_status = on_connection_status
BobTox.on_connection_status = on_connection_status
AliceTox.on_user_status = on_user_status
BobTox.on_user_status = on_user_status
self.alice.cs = False
self.bob.cs = False
self.alice.us = False
self.bob.us = False
assert self.wait_callbacks(self.alice, ['cs', 'us'])
assert self.wait_callbacks(self.bob, ['cs', 'us'])
AliceTox.on_connection_status = Tox.on_connection_status
BobTox.on_connection_status = Tox.on_connection_status
AliceTox.on_user_status = Tox.on_user_status
BobTox.on_user_status = Tox.on_user_status
def test_boostrap(self):
"""
t:bootstrap_from_address
"""
assert self.alice.isconnected()
assert self.bob.isconnected()
def test_address(self):
"""
t:get_address
t:get_nospam
t:set_nospam
t:get_keys
"""
assert len(self.alice.get_address()) == ADDR_SIZE
assert len(self.bob.get_address()) == ADDR_SIZE
self.alice.set_nospam(0x12345678)
assert self.alice.get_nospam() == 0x12345678
pk, sk = self.alice.get_keys()
assert pk == self.alice.get_address()[:CLIENT_ID_SIZE]
def test_self_name(self):
"""
t:set_name
t:get_self_name
t:get_self_name_size
"""
self.alice.set_name('Alice')
self.loop(10)
assert self.alice.get_self_name() == 'Alice'
assert self.alice.get_self_name_size() == len('Alice')
def test_status_message(self):
"""
t:get_self_status_message
t:get_self_status_message_size
t:get_status_message
t:get_status_message_size
t:on_status_message
t:set_status_message
"""
self.bob_add_alice_as_friend()
MSG = 'Happy'
AID = self.aid
def on_status_message(self, friend_id, new_message):
assert friend_id == AID
assert new_message == MSG
self.sm = True
BobTox.on_status_message = on_status_message
self.bob.sm = False
self.alice.set_status_message(MSG)
assert self.wait_callback(self.bob, 'sm')
BobTox.on_status_message = Tox.on_status_message
assert self.alice.get_self_status_message() == MSG
assert self.alice.get_self_status_message_size() == len(MSG)
assert self.bob.get_status_message(self.aid) == MSG
assert self.bob.get_status_message_size(self.aid) == len(MSG)
def test_user_status(self):
"""
t:get_self_user_status
t:get_user_status
t:on_user_status
t:set_user_status
"""
self.bob_add_alice_as_friend()
AID = self.aid
def on_user_status(self, friend_id, new_status):
assert friend_id == AID
assert new_status == Tox.USERSTATUS_BUSY
self.us = True
self.alice.set_user_status(Tox.USERSTATUS_BUSY)
BobTox.on_user_status = on_user_status
self.bob.us = False
assert self.wait_callback(self.bob, 'us')
BobTox.on_user_status = Tox.on_user_status
assert self.alice.get_self_user_status() == Tox.USERSTATUS_BUSY
assert self.bob.get_user_status(self.aid) == Tox.USERSTATUS_BUSY
def test_connection_status(self):
"""
t:get_friend_connection_status
t:on_connection_status
"""
self.bob_add_alice_as_friend()
AID = self.aid
def on_connection_status(self, friend_id, status):
assert friend_id == AID
assert status is False
self.cs = True
BobTox.on_connection_status = on_connection_status
self.bob.cs = False
self.alice.kill()
self.alice = Tox()
assert self.wait_callback(self.bob, 'cs')
BobTox.on_connection_status = Tox.on_connection_status
assert self.bob.get_friend_connection_status(self.aid) is False
def test_tox(self):
"""
t:size
t:save
t:load
"""
assert self.alice.size() > 0
data = self.alice.save()
assert data is not None
addr = self.alice.get_address()
self.alice.kill()
self.alice = Tox()
self.alice.load(data)
assert addr == self.alice.get_address()
def test_tox_from_file(self):
"""
t:save_to_file
t:load_from_file
"""
self.alice.save_to_file('data')
addr = self.alice.get_address()
self.alice.kill()
self.alice = Tox()
#: Test invalid file
try:
self.alice.load_from_file('not_exists')
except OperationFailedError:
pass
else:
assert False
self.alice.load_from_file('data')
assert addr == self.alice.get_address()
def test_friend(self):
"""
t:count_friendlist
t:del_friend
t:friend_exists
t:get_client_id
t:get_friendlist
t:get_name
t:get_name_size
t:get_num_online_friends
t:on_name_change
"""
#: Test friend request
self.bob_add_alice_as_friend()
assert self.alice.friend_exists(self.bid)
assert self.bob.friend_exists(self.aid)
#: Test friend exists
assert not self.alice.friend_exists(self.bid + 1)
assert not self.bob.friend_exists(self.aid + 1)
#: Test get_cliend_id
assert self.alice.get_client_id(self.bid) == \
self.bob.get_address()[:CLIENT_ID_SIZE]
assert self.bob.get_client_id(self.aid) == \
self.alice.get_address()[:CLIENT_ID_SIZE]
#: Test friendlist
assert self.alice.get_friendlist() == [self.bid]
assert self.bob.get_friendlist() == [self.aid]
assert self.alice.count_friendlist() == 1
assert self.bob.count_friendlist() == 1
assert self.alice.get_num_online_friends() == 1
assert self.bob.get_num_online_friends() == 1
#: Test friend name
NEWNAME = 'Jenny'
AID = self.aid
def on_name_change(self, fid, newname):
assert fid == AID
assert newname == NEWNAME
self.nc = True
BobTox.on_name_change = on_name_change
self.bob.nc = False
self.alice.set_name(NEWNAME)
assert self.wait_callback(self.bob, 'nc')
assert self.bob.get_name(self.aid) == NEWNAME
assert self.bob.get_name_size(self.aid) == len(NEWNAME)
BobTox.on_name_change = Tox.on_name_change
def test_friend_message_and_action(self):
"""
t:on_friend_action
t:on_friend_message
t:send_action
t:send_message
"""
self.bob_add_alice_as_friend()
#: Test message
MSG = 'Hi, Bob!'
BID = self.bid
def on_friend_message(self, fid, message):
assert fid == BID
assert message == MSG
self.fm = True
AliceTox.on_friend_message = on_friend_message
self.ensure_exec(self.bob.send_message, (self.aid, MSG))
self.alice.fm = False
assert self.wait_callback(self.alice, 'fm')
AliceTox.on_friend_message = Tox.on_friend_message
#: Test action
ACTION = 'Kick'
BID = self.bid
def on_friend_action(self, fid, action):
assert fid == BID
assert action == ACTION
self.fa = True
AliceTox.on_friend_action = on_friend_action
self.ensure_exec(self.bob.send_action, (self.aid, ACTION))
self.alice.fa = False
assert self.wait_callback(self.alice, 'fa')
AliceTox.on_friend_action = Tox.on_friend_action
#: Test delete friend
self.alice.del_friend(self.bid)
self.loop(10)
assert not self.alice.friend_exists(self.bid)
def test_meta_status(self):
"""
t:on_read_receipt
t:on_typing_change
t:set_user_is_typing
t:get_is_typing
t:get_last_online
"""
self.bob_add_alice_as_friend()
AID = self.aid
#: Test typing status
def on_typing_change(self, fid, is_typing):
assert fid == AID
assert is_typing is True
assert self.get_is_typing(fid) is True
self.ut = True
BobTox.on_typing_change = on_typing_change
self.bob.ut = False
self.alice.set_user_is_typing(self.bid, True)
assert self.wait_callback(self.bob, 'ut')
BobTox.on_typing_change = Tox.on_typing_change
#: Test last online
assert self.alice.get_last_online(self.bid) is not None
assert self.bob.get_last_online(self.aid) is not None
def test_group(self):
"""
t:add_groupchat
t:count_chatlist
t:del_groupchat
t:get_chatlist
t:group_action_send
t:group_get_names
t:group_get_title
t:group_get_type
t:group_message_send
t:group_number_peers
t:group_peername
t:group_set_title
t:invite_friend
t:join_groupchat
t:on_group_action
t:on_group_invite
t:on_group_message
t:on_group_namelist_change
"""
self.bob_add_alice_as_friend()
#: Test group add
group_id = self.bob.add_groupchat()
assert group_id >= 0
self.loop(50)
BID = self.bid
def on_group_invite(self, fid, type_, data):
assert fid == BID
assert type_ == 0
gn = self.join_groupchat(fid, data)
assert type_ == self.group_get_type(gn)
self.gi = True
AliceTox.on_group_invite = on_group_invite
def on_group_namelist_change(self, gid, peer_number, change):
assert gid == group_id
assert change == Tox.CHAT_CHANGE_PEER_ADD
self.gn = True
AliceTox.on_group_namelist_change = on_group_namelist_change
self.alice.gi = False
self.alice.gn = False
self.ensure_exec(self.bob.invite_friend, (self.aid, group_id))
assert self.wait_callbacks(self.alice, ['gi', 'gn'])
AliceTox.on_group_invite = Tox.on_group_invite
AliceTox.on_group_namelist_change = Tox.on_group_namelist_change
#: Test group number of peers
self.loop(50)
assert self.bob.group_number_peers(group_id) == 2
#: Test group peername
self.alice.set_name('Alice')
self.bob.set_name('Bob')
def on_group_namelist_change(self, gid, peer_number, change):
if change == Tox.CHAT_CHANGE_PEER_NAME:
self.gn = True
AliceTox.on_group_namelist_change = on_group_namelist_change
self.alice.gn = False
assert self.wait_callback(self.alice, 'gn')
AliceTox.on_group_namelist_change = Tox.on_group_namelist_change
peernames = [self.bob.group_peername(group_id, i) for i in
range(self.bob.group_number_peers(group_id))]
assert 'Alice' in peernames
assert 'Bob' in peernames
assert sorted(self.bob.group_get_names(group_id)) == ['Alice', 'Bob']
#: Test title change
self.bob.group_set_title(group_id, 'My special title')
assert self.bob.group_get_title(group_id) == 'My special title'
#: Test group message
AID = self.aid
BID = self.bid
MSG = 'Group message test'
def on_group_message(self, gid, fgid, message):
if fgid == AID:
assert gid == group_id
assert message == MSG
self.gm = True
AliceTox.on_group_message = on_group_message
self.alice.gm = False
self.ensure_exec(self.bob.group_message_send, (group_id, MSG))
assert self.wait_callback(self.alice, 'gm')
AliceTox.on_group_message = Tox.on_group_message
#: Test group action
AID = self.aid
BID = self.bid
MSG = 'Group action test'
def on_group_action(self, gid, fgid, action):
if fgid == AID:
assert gid == group_id
assert action == MSG
self.ga = True
AliceTox.on_group_action = on_group_action
self.alice.ga = False
self.ensure_exec(self.bob.group_action_send, (group_id, MSG))
assert self.wait_callback(self.alice, 'ga')
AliceTox.on_group_action = Tox.on_group_action
#: Test chatlist
assert len(self.bob.get_chatlist()) == self.bob.count_chatlist()
assert len(self.alice.get_chatlist()) == self.bob.count_chatlist()
assert self.bob.count_chatlist() == 1
self.bob.del_groupchat(group_id)
assert self.bob.count_chatlist() == 0
def test_file_transfer(self):
"""
t:file_data_remaining
t:file_data_size
t:file_send_control
t:file_send_data
t:new_file_sender
t:on_file_control
t:on_file_data
t:on_file_send_request
"""
self.bob_add_alice_as_friend()
FILE = os.urandom(1024 * 1024)
FILE_NAME = "test.bin"
FILE_SIZE = len(FILE)
m = hashlib.md5()
m.update(FILE)
FILE_DIGEST = m.hexdigest()
BID = self.bid
CONTEXT = {'FILE': bytes(), 'RECEIVED': 0, 'START': False, 'SENT': 0}
def on_file_send_request(self, fid, filenumber, size, filename):
assert fid == BID
assert size == FILE_SIZE
assert filename == FILE_NAME
self.file_send_control(fid, 1, filenumber, Tox.FILECONTROL_ACCEPT)
def on_file_control(self, fid, receive_send, file_number, ct, data):
assert fid == BID
if receive_send == 0 and ct == Tox.FILECONTROL_FINISHED:
assert CONTEXT['RECEIVED'] == FILE_SIZE
m = hashlib.md5()
m.update(CONTEXT['FILE'])
assert m.hexdigest() == FILE_DIGEST
self.completed = True
def on_file_data(self, fid, file_number, data):
assert fid == BID
CONTEXT['FILE'] += data
CONTEXT['RECEIVED'] += len(data)
if CONTEXT['RECEIVED'] < FILE_SIZE:
assert self.file_data_remaining(
fid, file_number, 1) == FILE_SIZE - CONTEXT['RECEIVED']
AliceTox.on_file_send_request = on_file_send_request
AliceTox.on_file_control = on_file_control
AliceTox.on_file_data = on_file_data
def on_file_control2(self, fid, receive_send, file_number, ct, data):
if receive_send == 1 and ct == Tox.FILECONTROL_ACCEPT:
CONTEXT['START'] = True
BobTox.on_file_control = on_file_control2
self.alice.completed = False
BLK = self.bob.file_data_size(self.aid)
FN = self.bob.new_file_sender(self.aid, FILE_SIZE, FILE_NAME)
while not self.alice.completed:
if CONTEXT['START']:
try:
while True:
if CONTEXT['SENT'] == FILE_SIZE:
self.bob.file_send_control(
self.aid, 0, FN,
Tox.FILECONTROL_FINISHED
)
CONTEXT['START'] = False
break
else:
ed = CONTEXT['SENT'] + BLK
if ed > FILE_SIZE:
ed = FILE_SIZE
self.bob.file_send_data(
self.aid, FN,
FILE[CONTEXT['SENT']:ed]
)
CONTEXT['SENT'] = ed
except:
pass
self.alice.do()
self.bob.do()
sleep(0.02)
AliceTox.on_file_send_request = Tox.on_file_send_request
AliceTox.on_file_control = Tox.on_file_control
AliceTox.on_file_data = Tox.on_file_data
BobTox.on_file_control = Tox.on_file_control
if __name__ == '__main__':
methods = set([x for x in dir(Tox)
if not x[0].isupper() and not x[0] == '_'])
docs = "".join([getattr(ToxTest, x).__doc__ for x in dir(ToxTest)
if getattr(ToxTest, x).__doc__ is not None])
tested = set(re.findall(r't:(.*?)\n', docs))
not_tested = methods.difference(tested)
print('Test Coverage: %.2f%%' % (len(tested) * 100.0 / len(methods)))
if len(not_tested):
print('Not tested:\n %s' % "\n ".join(sorted(list(not_tested))))
unittest.main()
|
bfarkas/django-publications | refs/heads/develop | publications/bibtex.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
__version__ = '1.2.0'
import re
import publications.six as six
# special character mapping
special_chars = (
(r'\"{a}', 'ä'), (r'{\"a}', 'ä'), (r'\"a', 'ä'), (r'H{a}', 'ä'),
(r'\"{A}', 'Ä'), (r'{\"A}', 'Ä'), (r'\"A', 'Ä'), (r'H{A}', 'Ä'),
(r'\"{o}', 'ö'), (r'{\"o}', 'ö'), (r'\"o', 'ö'), (r'H{o}', 'ö'),
(r'\"{O}', 'Ö'), (r'{\"O}', 'Ö'), (r'\"O', 'Ö'), (r'H{O}', 'Ö'),
(r'\"{u}', 'ü'), (r'{\"u}', 'ü'), (r'\"u', 'ü'), (r'H{u}', 'ü'),
(r'\"{U}', 'Ü'), (r'{\"U}', 'Ü'), (r'\"U', 'Ü'), (r'H{U}', 'Ü'),
(r'{‘a}', 'à'), (r'\‘A', 'À'),
(r'{‘e}', 'è'), (r'\‘E', 'È'),
(r'{‘o}', 'ò'), (r'\‘O', 'Ò'),
(r'{‘u}', 'ù'), (r'\‘U', 'Ù'),
(r'{’a}', 'á'), (r'\’A', 'Á'),
(r'{’e}', 'é'), (r'\’E', 'É'),
(r'{’o}', 'ó'), (r'\’O', 'Ó'),
(r'{’u}', 'ú'), (r'\’U', 'Ú'),
(r'\`a', 'à'), (r'\`A', 'À'),
(r'\`e', 'è'), (r'\`E', 'È'),
(r'\`u', 'ù'), (r'\`U', 'Ù'),
(r'\`o', 'ò'), (r'\`O', 'Ò'),
(r'\^o', 'ô'), (r'\^O', 'Ô'),
(r'\ss', 'ß'),
(r'\ae', 'æ'), (r'\AE', 'Æ'))
def parse(string):
"""
Takes a string in BibTex format and returns a list of BibTex entries, where
each entry is a dictionary containing the entries' key-value pairs.
@type string: string
@param string: bibliography in BibTex format
@rtype: list
@return: a list of dictionaries representing a bibliography
"""
# bibliography
bib = []
# make sure we are dealing with unicode strings
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
# replace special characters
for key, value in special_chars:
string = string.replace(key, value)
# split into BibTex entries
entries = re.findall(r'(?u)@(\w+)[ \t]?{[ \t\n\r]*([^,\s]*)[ \t]*,?\s*((?:[^=,\s]+\s*\=\s*(?:"[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,}]*),?\s*?)+)\s*}', string)
for entry in entries:
# parse entry
pairs = re.findall(r'(?u)([^=,\s]+)\s*\=\s*("[^"]*"|{(?:[^{}]*|{[^{}]*})*}|[^,]*)', entry[2])
# add to bibliography
bib.append({'type': entry[0].lower(), 'key': entry[1]})
for key, value in pairs:
# post-process key and value
key = key.lower()
if value and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if value and value[0] == '{' and value[-1] == '}':
value = value[1:-1]
if key not in ['booktitle', 'title']:
value = value.replace('}', '').replace('{', '')
else:
if value.startswith('{') and value.endswith('}'):
value = value[1:]
value = value[:-1]
value = value.strip()
value = re.sub(r'\s+', ' ', value)
# store pair in bibliography
bib[-1][key] = value
return bib
|
MillerDix/NEChromeX | refs/heads/master | flaskTest/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py | 335 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
if sys.platform.startswith('java'):
skipped_extensions = ('.pyc', '.pyo', '.class')
else:
skipped_extensions = ('.pyc', '.pyo')
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
# Issue #50: need to preserve type of path on Python 2.x
# like os.path._get_sep
if isinstance(resource_name, bytes): # should only happen on 2.x
sep = b'/'
else:
sep = '/'
parts = resource_name.split(sep)
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return (f != '__pycache__' and not
f.endswith(self.skipped_extensions))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
def iterator(self, resource_name):
resource = self.find(resource_name)
if resource is not None:
todo = [resource]
while todo:
resource = todo.pop(0)
yield resource
if resource.is_container:
rname = resource.name
for name in resource.resources:
if not rname:
new_name = name
else:
new_name = '/'.join([rname, name])
child = self.find(new_name)
if child.is_container:
todo.append(child)
else:
yield child
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
# In Python 3.6, _frozen_importlib -> _frozen_importlib_external
try:
import _frozen_importlib_external as _fi
except ImportError:
import _frozen_importlib as _fi
_finder_registry[_fi.SourceFileLoader] = ResourceFinder
_finder_registry[_fi.FileFinder] = ResourceFinder
del _fi
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
|
shinsterneck/pdns | refs/heads/feature-geosql-backend | regression-tests.recursor-dnssec/test_Simple.py | 6 | import dns
import os
from recursortests import RecursorTest
class testSimple(RecursorTest):
_confdir = 'Simple'
_config_template = """dnssec=validate
auth-zones=authzone.example=configs/%s/authzone.zone""" % _confdir
@classmethod
def generateRecursorConfig(cls, confdir):
authzonepath = os.path.join(confdir, 'authzone.zone')
with open(authzonepath, 'w') as authzone:
authzone.write("""$ORIGIN authzone.example.
@ 3600 IN SOA {soa}
@ 3600 IN A 192.0.2.88
""".format(soa=cls._SOA))
super(testSimple, cls).generateRecursorConfig(confdir)
def testSOAs(self):
for zone in ['.', 'example.', 'secure.example.']:
expected = dns.rrset.from_text(zone, 0, dns.rdataclass.IN, 'SOA', self._SOA)
query = dns.message.make_query(zone, 'SOA', want_dnssec=True)
query.flags |= dns.flags.AD
res = self.sendUDPQuery(query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expected)
self.assertMatchingRRSIGInAnswer(res, expected)
def testA(self):
expected = dns.rrset.from_text('ns.secure.example.', 0, dns.rdataclass.IN, 'A', '{prefix}.9'.format(prefix=self._PREFIX))
query = dns.message.make_query('ns.secure.example', 'A', want_dnssec=True)
query.flags |= dns.flags.AD
res = self.sendUDPQuery(query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expected)
self.assertMatchingRRSIGInAnswer(res, expected)
def testDelegation(self):
query = dns.message.make_query('example', 'NS', want_dnssec=True)
query.flags |= dns.flags.AD
expectedNS = dns.rrset.from_text('example.', 0, 'IN', 'NS', 'ns1.example.', 'ns2.example.')
res = self.sendUDPQuery(query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expectedNS)
def testBogus(self):
query = dns.message.make_query('ted.bogus.example', 'A', want_dnssec=True)
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.SERVFAIL)
def testAuthZone(self):
query = dns.message.make_query('authzone.example', 'A', want_dnssec=True)
expectedA = dns.rrset.from_text('authzone.example.', 0, 'IN', 'A', '192.0.2.88')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NOERROR)
self.assertRRsetInAnswer(res, expectedA)
def testLocalhost(self):
queryA = dns.message.make_query('localhost', 'A', want_dnssec=True)
expectedA = dns.rrset.from_text('localhost.', 0, 'IN', 'A', '127.0.0.1')
queryPTR = dns.message.make_query('1.0.0.127.in-addr.arpa', 'PTR', want_dnssec=True)
expectedPTR = dns.rrset.from_text('1.0.0.127.in-addr.arpa.', 0, 'IN', 'PTR', 'localhost.')
resA = self.sendUDPQuery(queryA)
resPTR = self.sendUDPQuery(queryPTR)
self.assertRcodeEqual(resA, dns.rcode.NOERROR)
self.assertRRsetInAnswer(resA, expectedA)
self.assertRcodeEqual(resPTR, dns.rcode.NOERROR)
self.assertRRsetInAnswer(resPTR, expectedPTR)
def testLocalhostSubdomain(self):
queryA = dns.message.make_query('foo.localhost', 'A', want_dnssec=True)
expectedA = dns.rrset.from_text('foo.localhost.', 0, 'IN', 'A', '127.0.0.1')
resA = self.sendUDPQuery(queryA)
self.assertRcodeEqual(resA, dns.rcode.NOERROR)
self.assertRRsetInAnswer(resA, expectedA)
def testIslandOfSecurity(self):
query = dns.message.make_query('cname-to-islandofsecurity.secure.example.', 'A', want_dnssec=True)
expectedCNAME = dns.rrset.from_text('cname-to-islandofsecurity.secure.example.', 0, 'IN', 'CNAME', 'node1.islandofsecurity.example.')
expectedA = dns.rrset.from_text('node1.islandofsecurity.example.', 0, 'IN', 'A', '192.0.2.20')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NOERROR)
self.assertRRsetInAnswer(res, expectedA)
|
adekoder/flask_dbseeder | refs/heads/master | test/__init__.py | 1 | from flask import Flask
from flask_dbseeder import Seeder, SeederCommand, SeedManager, NotASubClassException
from flask_sqlalchemy import SQLAlchemy
import os
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, 'test-db.db')
db = SQLAlchemy(app)
seeder = Seeder(app, db)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64))
username = db.Column(db.String(64))
class UserSeeder():
def run(self):
user = User()
user.username = "john"
user.email = 'abc@gmail.com'
self.save(user)
class UserSeeder2(SeedManager):
def run(self):
user = User()
user.username = "john"
user.email = 'abc@gmail.com'
self.save(user)
class UserSeeder3(SeedManager):
pass |
kobejean/tensorflow | refs/heads/master | tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py | 24 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.layers.sparse_feature_cross."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
"""Tests with large batch size to force multithreading.
"""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(col1), self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([]), self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_output_zero_bucket(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[3735511728867393167]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_zero_bucket_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
def test_hashed_output(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[74]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v1_has_collision(self):
"""Tests the old version of the fingerprint concatenation has collisions.
"""
# The last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1], hashed_output=True, num_buckets=1024)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.equal(values[0], values[1]).all())
def test_hashed_output_v2_has_no_collision(self):
"""Tests the new version of the fingerprint concatenation has no collisions.
"""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1],
hashed_output=True,
num_buckets=1024,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
hashed_output=True,
num_buckets=1000)
with self.cached_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
|
kool79/intellij-community | refs/heads/master | python/lib/Lib/encodings/cp932.py | 817 | #
# cp932.py: Python Unicode Codec for CP932
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('cp932')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='cp932',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
MichaelNedzelsky/intellij-community | refs/heads/master | python/testData/refactoring/introduceVariable/dontSuggestBuiltinTypeNames.py | 166 | "foo <caret>bar" |
fredericlepied/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_snmp_contact.py | 39 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_contact
version_added: "2.4"
short_description: Manages SNMP contact configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP contact configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
contact:
description:
- Contact information.
required: true
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp contact test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP contact"
ce_snmp_contact:
state: present
contact: call Operator at 010-99999999
provider: "{{ cli }}"
- name: "Undo SNMP contact"
ce_snmp_contact:
state: absent
contact: call Operator at 010-99999999
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info contact call Operator at 010-99999999"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class SnmpContact(object):
""" Manages SNMP contact configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.contact = self.module.params['contact']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.contact:
if len(self.contact) > 255 or len(self.contact) < 1:
self.module.fail_json(
msg='Error: The len of contact %s is out of [1 - 255].' % self.contact)
else:
self.module.fail_json(
msg='Error: The len of contact is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.contact:
self.proposed["contact"] = self.contact
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.cur_cfg["contact"] = temp_data[1]
self.existing["contact"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.end_state["contact"] = temp_data[1]
def cli_load_config(self, commands):
""" Load configure by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure by cli """
regular = "| include snmp | include contact"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info contact %s" % self.contact
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info contact"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
pass
else:
self.set_config()
else:
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
contact=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpContact(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
AlphaSmartDog/DeepLearningNotes | refs/heads/master | Note-6 A3CNet/Note-6.2.1 代码阅读顺序/sonnet/python/modules/gated_rnn.py | 5 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LSTM based modules for TensorFlow snt.
This python module contains LSTM-like cores that fall under the broader group
of RNN cores. In general, initializers for the gate weights and other
model parameters may be passed to the constructor.
Typical usage example of the standard LSTM without peephole connections:
```
import sonnet as snt
hidden_size = 10
batch_size = 2
# Simple LSTM op on some input
rnn = snt.LSTM(hidden_size)
input = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
out, next_state = rnn(input, rnn.initial_state())
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import batch_norm
from sonnet.python.modules import conv
from sonnet.python.modules import layer_norm
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
import tensorflow as tf
from tensorflow.python.ops import array_ops
class LSTM(rnn_core.RNNCore):
"""LSTM recurrent network cell with optional peepholes & layer normalization.
The implementation is based on: http://arxiv.org/abs/1409.2329. We add
forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
#### Layer normalization
This is described in https://arxiv.org/pdf/1607.06450.pdf
#### Peep-hole connections
Peep-hole connections may optionally be used by specifying a flag in the
constructor. These connections can aid increasing the precision of output
timing, for more details see:
https://research.google.com/pubs/archive/43905.pdf
Attributes:
state_size: Tuple of `tf.TensorShape`s indicating the size of state tensors.
output_size: `tf.TensorShape` indicating the size of the core output.
use_peepholes: Boolean indicating whether peephole connections are used.
"""
# Keys that may be provided for parameter initializers.
W_GATES = "w_gates" # weight for gates
B_GATES = "b_gates" # bias of gates
W_F_DIAG = "w_f_diag" # weight for prev_cell -> forget gate peephole
W_I_DIAG = "w_i_diag" # weight for prev_cell -> input gate peephole
W_O_DIAG = "w_o_diag" # weight for prev_cell -> output gate peephole
POSSIBLE_INITIALIZER_KEYS = {W_GATES, B_GATES, W_F_DIAG, W_I_DIAG, W_O_DIAG}
def __init__(self,
hidden_size,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
use_peepholes=False,
use_layer_norm=False,
hidden_clip_value=None,
cell_clip_value=None,
custom_getter=None,
name="lstm"):
"""Construct LSTM.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`LSTM.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`LSTM.get_possible_initializer_keys`.
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This dict may contain
any of the keys returned by
`LSTM.get_possible_initializer_keys`.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_layer_norm: Boolean that indicates whether to apply layer
normalization.
hidden_clip_value: Optional number; if set, then the LSTM hidden state
vector is clipped by this value.
cell_clip_value: Optional number; if set, then the LSTM cell vector is
clipped by this value.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
ValueError: if a peephole initializer is passed in the initializer list,
but `use_peepholes` is False.
"""
super(LSTM, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._forget_bias = forget_bias
self._use_peepholes = use_peepholes
self._use_layer_norm = use_layer_norm
self._hidden_clip_value = hidden_clip_value
self._cell_clip_value = cell_clip_value
self.possible_keys = self.get_possible_initializer_keys(
use_peepholes=use_peepholes)
self._initializers = util.check_initializers(initializers,
self.possible_keys)
self._partitioners = util.check_initializers(partitioners,
self.possible_keys)
self._regularizers = util.check_initializers(regularizers,
self.possible_keys)
if hidden_clip_value is not None and hidden_clip_value < 0:
raise ValueError("The value of hidden_clip_value should be nonnegative.")
if cell_clip_value is not None and cell_clip_value < 0:
raise ValueError("The value of cell_clip_value should be nonnegative.")
@classmethod
def get_possible_initializer_keys(cls, use_peepholes=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
return possible_keys
def _build(self, inputs, prev_state):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell).
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) where next_hidden and next_cell have size
`[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
prev_hidden, prev_cell = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
if self._use_layer_norm:
gates = layer_norm.LayerNorm()(gates)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
return next_hidden, (next_hidden, next_cell)
def _create_gate_variables(self, input_shape, dtype):
"""Initialize the variables used for the gates."""
if len(input_shape) != 2:
raise ValueError(
"Rank of shape must be {} not: {}".format(2, len(input_shape)))
input_size = input_shape.dims[1].value
b_shape = [4 * self._hidden_size]
equiv_input_size = self._hidden_size + input_size
initializer = basic.create_linear_initializer(equiv_input_size)
self._w_xh = tf.get_variable(
self.W_GATES,
shape=[self._hidden_size + input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._b = tf.get_variable(
self.B_GATES,
shape=b_shape,
dtype=dtype,
initializer=self._initializers.get(self.B_GATES, initializer),
partitioner=self._partitioners.get(self.B_GATES),
regularizer=self._regularizers.get(self.B_GATES))
def _create_peephole_variables(self, dtype):
"""Initialize the variables used for the peephole connections."""
self._w_f_diag = tf.get_variable(
self.W_F_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_F_DIAG),
partitioner=self._partitioners.get(self.W_F_DIAG),
regularizer=self._regularizers.get(self.W_F_DIAG))
self._w_i_diag = tf.get_variable(
self.W_I_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_I_DIAG),
partitioner=self._partitioners.get(self.W_I_DIAG),
regularizer=self._regularizers.get(self.W_I_DIAG))
self._w_o_diag = tf.get_variable(
self.W_O_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_O_DIAG),
partitioner=self._partitioners.get(self.W_O_DIAG),
regularizer=self._regularizers.get(self.W_O_DIAG))
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
return (tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size]))
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape([self._hidden_size])
@property
def use_peepholes(self):
"""Boolean indicating whether peephole connections are used."""
return self._use_peepholes
@property
def use_layer_norm(self):
"""Boolean indicating whether layer norm is enabled."""
return self._use_layer_norm
class BatchNormLSTM(rnn_core.RNNCore):
"""LSTM recurrent network cell with optional peepholes, batch normalization.
The base implementation is based on: http://arxiv.org/abs/1409.2329. We add
forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
#### Peep-hole connections
Peep-hole connections may optionally be used by specifying a flag in the
constructor. These connections can aid increasing the precision of output
timing, for more details see:
https://research.google.com/pubs/archive/43905.pdf
#### Batch normalization
The batch norm transformation (in training mode) is
batchnorm(x) = gamma * (x - mean(x)) / stddev(x) + beta,
where gamma is a learnt scaling factor and beta is a learnt offset.
Batch normalization may optionally be used at different places in the LSTM by
specifying flag(s) in the constructor. These are applied when calculating
the gate activations and cell-to-hidden transformation. The set-up is based on
https://arxiv.org/pdf/1603.09025.pdf
##### Batch normalization: where to apply?
Batch norm can be applied in three different places in the LSTM:
(h) To the W_h h_{t-1} contribution to the gates from the previous hiddens.
(x) To the W_x x_t contribution to the gates from the current input.
(c) To the cell value c_t when calculating the output h_t from the cell.
(The notation here is consistent with the Recurrent Batch Normalization
paper). Each of these can be controlled individually, because batch norm is
expensive, and not all are necessary. The paper doesn't mention the relative
effects of these different batch norms; however, experimentation with a
shallow LSTM for the `permuted_mnist` sequence task suggests that (h) is the
most important and the other two can be left off. For other tasks or deeper
(stacked) LSTMs, other batch norm combinations may be more effective.
##### Batch normalization: collecting stats (training vs test)
When switching to testing (see `LSTM.with_batch_norm_control`), we can use a
mean and stddev learnt from the training data instead of using the statistics
from the test data. (This both increases test accuracy because the statistics
have less variance, and if the test data does not have the same distribution
as the training data then we must use the training statistics to ensure the
effective network does not change when switching to testing anyhow.)
This does however introduces a slight subtlety. The first few time steps of
the RNN tend to have varying statistics (mean and variance) before settling
down to a steady value. Therefore in general, better performance is obtained
by using separate statistics for the first few time steps, and then using the
final set of statistics for all subsequent time steps. This is controlled by
the parameter `max_unique_stats`. (We can't have an unbounded number of
distinct statistics for both technical reasons and also for the case where
test sequences are longer than anything seen in training.)
You may be fine leaving it at its default value of 1. Small values (like 10)
may achieve better performance on some tasks when testing with cached
statistics.
Attributes:
state_size: Tuple of `tf.TensorShape`s indicating the size of state tensors.
output_size: `tf.TensorShape` indicating the size of the core output.
use_peepholes: Boolean indicating whether peephole connections are used.
use_batch_norm_h: Boolean indicating whether batch norm (h) is enabled.
use_batch_norm_x: Boolean indicating whether batch norm (x) is enabled.
use_batch_norm_c: Boolean indicating whether batch norm (c) is enabled.
"""
# Keys that may be provided for parameter initializers.
W_GATES = "w_gates" # weight for gates
B_GATES = "b_gates" # bias of gates
W_F_DIAG = "w_f_diag" # weight for prev_cell -> forget gate peephole
W_I_DIAG = "w_i_diag" # weight for prev_cell -> input gate peephole
W_O_DIAG = "w_o_diag" # weight for prev_cell -> output gate peephole
GAMMA_H = "gamma_h" # batch norm scaling for previous_hidden -> gates
GAMMA_X = "gamma_x" # batch norm scaling for input -> gates
GAMMA_C = "gamma_c" # batch norm scaling for cell -> output
BETA_C = "beta_c" # (batch norm) bias for cell -> output
POSSIBLE_INITIALIZER_KEYS = {W_GATES, B_GATES, W_F_DIAG, W_I_DIAG, W_O_DIAG,
GAMMA_H, GAMMA_X, GAMMA_C, BETA_C}
# Keep old name for backwards compatibility
POSSIBLE_KEYS = POSSIBLE_INITIALIZER_KEYS
def __init__(self,
hidden_size,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
use_peepholes=False,
use_batch_norm_h=True,
use_batch_norm_x=False,
use_batch_norm_c=False,
max_unique_stats=1,
hidden_clip_value=None,
cell_clip_value=None,
custom_getter=None,
name="batch_norm_lstm"):
"""Construct `BatchNormLSTM`.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
The gamma and beta variables control batch normalization values for
different batch norm transformations inside the cell; see the paper for
details.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This dict may contain
any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
use, and is enabled by default.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
max_unique_stats: The maximum number of steps to use unique batch norm
statistics for. (See module description above for more details.)
hidden_clip_value: Optional number; if set, then the LSTM hidden state
vector is clipped by this value.
cell_clip_value: Optional number; if set, then the LSTM cell vector is
clipped by this value.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
ValueError: if a peephole initializer is passed in the initializer list,
but `use_peepholes` is False.
ValueError: if a batch norm initializer is passed in the initializer list,
but batch norm is disabled.
ValueError: if none of the `use_batch_norm_*` options are True.
ValueError: if `max_unique_stats` is < 1.
"""
if not any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c]):
raise ValueError("At least one use_batch_norm_* option is required for "
"BatchNormLSTM")
super(BatchNormLSTM, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._forget_bias = forget_bias
self._use_peepholes = use_peepholes
self._max_unique_stats = max_unique_stats
self._use_batch_norm_h = use_batch_norm_h
self._use_batch_norm_x = use_batch_norm_x
self._use_batch_norm_c = use_batch_norm_c
self._hidden_clip_value = hidden_clip_value
self._cell_clip_value = cell_clip_value
self.possible_keys = self.get_possible_initializer_keys(
use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h,
use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c)
self._initializers = util.check_initializers(initializers,
self.possible_keys)
self._partitioners = util.check_initializers(partitioners,
self.possible_keys)
self._regularizers = util.check_initializers(regularizers,
self.possible_keys)
if max_unique_stats < 1:
raise ValueError("max_unique_stats must be >= 1")
if max_unique_stats != 1 and not (
use_batch_norm_h or use_batch_norm_x or use_batch_norm_c):
raise ValueError("max_unique_stats specified but batch norm disabled")
if hidden_clip_value is not None and hidden_clip_value < 0:
raise ValueError("The value of hidden_clip_value should be nonnegative.")
if cell_clip_value is not None and cell_clip_value < 0:
raise ValueError("The value of cell_clip_value should be nonnegative.")
if use_batch_norm_h:
self._batch_norm_h = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_h")
if use_batch_norm_x:
self._batch_norm_x = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_x")
if use_batch_norm_c:
self._batch_norm_c = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_c")
def with_batch_norm_control(self, is_training, test_local_stats=True):
"""Wraps this RNNCore with the additional control input to the `BatchNorm`s.
Example usage:
lstm = snt.BatchNormLSTM(4)
is_training = tf.placeholder(tf.bool)
rnn_input = ...
my_rnn = rnn.rnn(lstm.with_batch_norm_control(is_training), rnn_input)
Args:
is_training: Boolean that indicates whether we are in
training mode or testing mode. When in training mode, the batch norm
statistics are taken from the given batch, and moving statistics are
updated. When in testing mode, the moving statistics are not updated,
and in addition if `test_local_stats` is False then the moving
statistics are used for the batch statistics. See the `BatchNorm` module
for more details.
test_local_stats: Boolean scalar indicated whether to use local
batch statistics in test mode.
Returns:
snt.RNNCore wrapping this class with the extra input(s) added.
"""
return BatchNormLSTM.CoreWithExtraBuildArgs(
self, is_training=is_training, test_local_stats=test_local_stats)
@classmethod
def get_possible_initializer_keys(
cls, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False,
use_batch_norm_c=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
gamma_h: batch norm scaling for previous_hidden -> gates
gamma_x: batch norm scaling for input -> gates
gamma_c: batch norm scaling for cell -> output
beta_c: batch norm bias for cell -> output
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
turn on.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_batch_norm_h:
possible_keys.remove(cls.GAMMA_H)
if not use_batch_norm_x:
possible_keys.remove(cls.GAMMA_X)
if not use_batch_norm_c:
possible_keys.difference_update({cls.GAMMA_C, cls.BETA_C})
return possible_keys
def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
if is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
if self._max_unique_stats == 1:
prev_hidden, prev_cell = prev_state
time_step = None
else:
prev_hidden, prev_cell, time_step = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
self._create_batch_norm_variables(inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
if self._use_batch_norm_h or self._use_batch_norm_x:
gates_h = tf.matmul(prev_hidden, self._w_h)
gates_x = tf.matmul(inputs, self._w_x)
if self._use_batch_norm_h:
gates_h = self._gamma_h * self._batch_norm_h(gates_h,
time_step,
is_training,
test_local_stats)
if self._use_batch_norm_x:
gates_x = self._gamma_x * self._batch_norm_x(gates_x,
time_step,
is_training,
test_local_stats)
gates = gates_h + gates_x
else:
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_batch_norm_c:
cell_output = (self._beta_c
+ self._gamma_c * self._batch_norm_c(cell_output,
time_step,
is_training,
test_local_stats))
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
if self._max_unique_stats == 1:
return next_hidden, (next_hidden, next_cell)
else:
return next_hidden, (next_hidden, next_cell, time_step + 1)
def _create_batch_norm_variables(self, dtype):
"""Initialize the variables used for the `BatchNorm`s (if any)."""
# The paper recommends a value of 0.1 for good gradient flow through the
# tanh nonlinearity (although doesn't say whether this is for all gammas,
# or just some).
gamma_initializer = tf.constant_initializer(0.1)
if self._use_batch_norm_h:
self._gamma_h = tf.get_variable(
self.GAMMA_H,
shape=[4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_H, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_H),
regularizer=self._regularizers.get(self.GAMMA_H))
if self._use_batch_norm_x:
self._gamma_x = tf.get_variable(
self.GAMMA_X,
shape=[4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_X, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_X),
regularizer=self._regularizers.get(self.GAMMA_X))
if self._use_batch_norm_c:
self._gamma_c = tf.get_variable(
self.GAMMA_C,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_C, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_C),
regularizer=self._regularizers.get(self.GAMMA_C))
self._beta_c = tf.get_variable(
self.BETA_C,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.BETA_C),
partitioner=self._partitioners.get(self.BETA_C),
regularizer=self._regularizers.get(self.BETA_C))
def _create_gate_variables(self, input_shape, dtype):
"""Initialize the variables used for the gates."""
if len(input_shape) != 2:
raise ValueError(
"Rank of shape must be {} not: {}".format(2, len(input_shape)))
input_size = input_shape.dims[1].value
b_shape = [4 * self._hidden_size]
equiv_input_size = self._hidden_size + input_size
initializer = basic.create_linear_initializer(equiv_input_size)
if self._use_batch_norm_h or self._use_batch_norm_x:
self._w_h = tf.get_variable(
self.W_GATES + "_H",
shape=[self._hidden_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._w_x = tf.get_variable(
self.W_GATES + "_X",
shape=[input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
else:
self._w_xh = tf.get_variable(
self.W_GATES,
shape=[self._hidden_size + input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._b = tf.get_variable(
self.B_GATES,
shape=b_shape,
dtype=dtype,
initializer=self._initializers.get(self.B_GATES, initializer),
partitioner=self._partitioners.get(self.B_GATES),
regularizer=self._regularizers.get(self.B_GATES))
def _create_peephole_variables(self, dtype):
"""Initialize the variables used for the peephole connections."""
self._w_f_diag = tf.get_variable(
self.W_F_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_F_DIAG),
partitioner=self._partitioners.get(self.W_F_DIAG),
regularizer=self._regularizers.get(self.W_F_DIAG))
self._w_i_diag = tf.get_variable(
self.W_I_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_I_DIAG),
partitioner=self._partitioners.get(self.W_I_DIAG),
regularizer=self._regularizers.get(self.W_I_DIAG))
self._w_o_diag = tf.get_variable(
self.W_O_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_O_DIAG),
partitioner=self._partitioners.get(self.W_O_DIAG),
regularizer=self._regularizers.get(self.W_O_DIAG))
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state tensor of zeros.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An optional pair of initializers for the
initial hidden state and cell state.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor tuple `([batch_size, state_size], [batch_size, state_size], ?)`
filled with zeros, with the third entry present when batch norm is enabled
with `max_unique_stats > 1', with value `0` (representing the time step).
"""
if self._max_unique_stats == 1:
return super(BatchNormLSTM, self).initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers,
trainable_regularizers=trainable_regularizers, name=name)
else:
with tf.name_scope(self._initial_state_scope(name)):
if not trainable:
state = self.zero_state(batch_size, dtype)
else:
# We have to manually create the state ourselves so we don't create a
# variable that never gets used for the third entry.
state = rnn_core.trainable_initial_state(
batch_size,
(tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size])),
dtype=dtype,
initializers=trainable_initializers,
regularizers=trainable_regularizers,
name=self._initial_state_scope(name))
return (state[0], state[1], tf.constant(0, dtype=tf.int32))
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
if self._max_unique_stats == 1:
return (tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size]))
else:
return (tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size]),
tf.TensorShape(1))
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape([self._hidden_size])
@property
def use_peepholes(self):
"""Boolean indicating whether peephole connections are used."""
return self._use_peepholes
@property
def use_batch_norm_h(self):
"""Boolean indicating whether batch norm for hidden -> gates is enabled."""
return self._use_batch_norm_h
@property
def use_batch_norm_x(self):
"""Boolean indicating whether batch norm for input -> gates is enabled."""
return self._use_batch_norm_x
@property
def use_batch_norm_c(self):
"""Boolean indicating whether batch norm for cell -> output is enabled."""
return self._use_batch_norm_c
class IndexedStatsBatchNorm(base.AbstractModule):
"""BatchNorm module where batch statistics are selected by an input index.
This is used by LSTM+batchnorm, where we have distinct batch norm statistics
for the first `max_unique_stats` time steps, and then use the final set of
statistics for subsequent time steps.
The module has as input (x, index, is_training, test_local_stats). During
training or when test_local_stats=True, the output is simply batchnorm(x)
(where mean(x) and stddev(x) are used), and during training the
`BatchNorm` module accumulates statistics in mean_i, etc, where
i = min(index, max_unique_stats - 1).
During testing with test_local_stats=False, the output is batchnorm(x),
where mean_i and stddev_i are used instead of mean(x) and stddev(x).
See the `BatchNorm` module for more on is_training and test_local_stats.
No offset `beta` or scaling `gamma` are learnt.
"""
def __init__(self, max_unique_stats, name=None):
"""Create an IndexedStatsBatchNorm.
Args:
max_unique_stats: number of different indices to have statistics for;
indices beyond this will use the final statistics.
name: Name of the module.
"""
super(BatchNormLSTM.IndexedStatsBatchNorm, self).__init__(name=name)
self._max_unique_stats = max_unique_stats
def _build(self, inputs, index, is_training, test_local_stats):
"""Add the IndexedStatsBatchNorm module to the graph.
Args:
inputs: Tensor to apply batch norm to.
index: Scalar TensorFlow int32 value to select the batch norm index.
is_training: Boolean to indicate to `snt.BatchNorm` if we are
currently training.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
Returns:
Output of batch norm operation.
"""
def create_batch_norm():
return batch_norm.BatchNorm(offset=False, scale=False)(
inputs, is_training, test_local_stats)
if self._max_unique_stats > 1:
pred_fn_pairs = [(tf.equal(i, index), create_batch_norm)
for i in xrange(self._max_unique_stats - 1)]
out = tf.case(pred_fn_pairs, create_batch_norm)
out.set_shape(inputs.get_shape()) # needed for tf.case shape inference
return out
else:
return create_batch_norm()
class CoreWithExtraBuildArgs(rnn_core.RNNCore):
"""Wraps an RNNCore so that the build method receives extra args and kwargs.
This will pass the additional input `args` and `kwargs` to the _build
function of the snt.RNNCore after the input and prev_state inputs.
"""
def __init__(self, core, *args, **kwargs):
"""Construct the CoreWithExtraBuildArgs.
Args:
core: The snt.RNNCore to wrap.
*args: Extra arguments to pass to _build.
**kwargs: Extra keyword arguments to pass to _build.
"""
super(BatchNormLSTM.CoreWithExtraBuildArgs, self).__init__(
name=core.module_name + "_extra_args")
self._core = core
self._args = args
self._kwargs = kwargs
def _build(self, inputs, state):
return self._core(inputs, state, *self._args, **self._kwargs)
@property
def state_size(self):
"""Tuple indicating the size of nested state tensors."""
return self._core.state_size
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return self._core.output_size
class ConvLSTM(rnn_core.RNNCore):
"""Convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, conv_ndims, use_bias=True):
conv_class = cls._get_conv_class(conv_ndims)
return conv_class.get_possible_initializer_keys(use_bias)
@classmethod
def _get_conv_class(cls, conv_ndims):
if conv_ndims == 1:
return conv.Conv1D
elif conv_ndims == 2:
return conv.Conv2D
elif conv_ndims == 3:
return conv.Conv3D
else:
raise ValueError("Invalid convolution dimensionality.")
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
stride=1,
rate=1,
padding=conv.SAME,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="conv_lstm"):
"""Construct ConvLSTM.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as tuple, excluding the batch size.
output_channels: Number of output channels of the conv LSTM.
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 2), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size conv_ndims), or integer that is
used to define dilation rate in all dimensions. 1 corresponds to a
standard convolution, while rate > 1 corresponds to a dilated
convolution. Cannot be > 1 if any of stride is also > 1.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the output
of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
initializers: Dict containing ops to initialize the convolutional weights.
partitioners: Optional dict containing partitioners to partition
the convolutional weights and biases. As a default, no partitioners are
used.
regularizers: Optional dict containing regularizers for the convolutional
weights and biases. As a default, no regularizers are used.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTM, self).__init__(custom_getter=custom_getter, name=name)
self._conv_class = self._get_conv_class(conv_ndims)
if skip_connection and stride != 1:
raise ValueError("`stride` needs to be 1 when using skip connection")
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._rate = rate
self._padding = padding
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._initializers = initializers
self._partitioners = partitioners
self._regularizers = regularizers
self._total_output_channels = output_channels
if self._stride != 1:
self._total_output_channels //= self._stride * self._stride
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
self._convolutions = collections.defaultdict(self._new_convolution)
def _new_convolution(self):
return self._conv_class(
output_channels=4*self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
rate=self._rate,
padding=self._padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name="conv")
@property
def convolutions(self):
return self._convolutions
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
hidden_size = tf.TensorShape(self._input_shape[:-1] +
(self._output_channels,))
return (hidden_size, hidden_size)
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape(self._input_shape[:-1] +
(self._total_output_channels,))
def _build(self, inputs, state):
hidden, cell = state
input_conv = self._convolutions["input"]
hidden_conv = self._convolutions["hidden"]
next_hidden = input_conv(inputs) + hidden_conv(hidden)
gates = tf.split(value=next_hidden, num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, next_input, forget_gate, output_gate = gates
next_cell = tf.sigmoid(forget_gate + self._forget_bias) * cell
next_cell += tf.sigmoid(input_gate) * tf.tanh(next_input)
output = tf.tanh(next_cell) * tf.sigmoid(output_gate)
if self._skip_connection:
output = tf.concat([output, inputs], axis=-1)
return output, (output, next_cell)
class Conv1DLSTM(ConvLSTM):
"""1D convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return super(Conv1DLSTM, cls).get_possible_initializer_keys(1, use_bias)
def __init__(self, name="conv_1d_lstm", **kwargs):
"""Construct Conv1DLSTM. See `snt.ConvLSTM` for more details."""
super(Conv1DLSTM, self).__init__(conv_ndims=1, name=name, **kwargs)
class Conv2DLSTM(ConvLSTM):
"""2D convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return super(Conv2DLSTM, cls).get_possible_initializer_keys(2, use_bias)
def __init__(self, name="conv_2d_lstm", **kwargs):
"""Construct Conv2DLSTM. See `snt.ConvLSTM` for more details."""
super(Conv2DLSTM, self).__init__(conv_ndims=2, name=name, **kwargs)
class GRU(rnn_core.RNNCore):
"""GRU recurrent network cell.
The implementation is based on: https://arxiv.org/pdf/1412.3555v1.pdf.
Attributes:
state_size: Integer indicating the size of state tensor.
output_size: Integer indicating the size of the core output.
"""
# Keys that may be provided for parameter initializers.
WZ = "wz" # weight for input -> update cell
UZ = "uz" # weight for prev_state -> update cell
BZ = "bz" # bias for update_cell
WR = "wr" # weight for input -> reset cell
UR = "ur" # weight for prev_state -> reset cell
BR = "br" # bias for reset cell
WH = "wh" # weight for input -> candidate activation
UH = "uh" # weight for prev_state -> candidate activation
BH = "bh" # bias for candidate activation
POSSIBLE_INITIALIZER_KEYS = {WZ, UZ, BZ, WR, UR, BR, WH, UH, BH}
# Keep old name for backwards compatibility
POSSIBLE_KEYS = POSSIBLE_INITIALIZER_KEYS
def __init__(self, hidden_size, initializers=None, partitioners=None,
regularizers=None, custom_getter=None, name="gru"):
"""Construct GRU.
Args:
hidden_size: (int) Hidden size dimensionality.
initializers: Dict containing ops to initialize the weights. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
"""
super(GRU, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
@classmethod
def get_possible_initializer_keys(cls):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wz: weight for input -> update cell
uz: weight for prev_state -> update cell
bz: bias for update_cell
wr: weight for input -> reset cell
ur: weight for prev_state -> reset cell
br: bias for reset cell
wh: weight for input -> candidate activation
uh: weight for prev_state -> candidate activation
bh: bias for candidate activation
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
return super(GRU, cls).get_possible_initializer_keys(cls)
def _build(self, inputs, prev_state):
"""Connects the GRU module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tensor of size `[batch_size, hidden_size]`.
Returns:
A tuple (output, next_state) where `output` is a Tensor of size
`[batch_size, hidden_size]` and `next_state` is a Tensor of size
`[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
input_size = inputs.get_shape()[1]
weight_shape = (input_size, self._hidden_size)
u_shape = (self._hidden_size, self._hidden_size)
bias_shape = (self._hidden_size,)
self._wz = tf.get_variable(GRU.WZ, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WZ),
partitioner=self._partitioners.get(GRU.WZ),
regularizer=self._regularizers.get(GRU.WZ))
self._uz = tf.get_variable(GRU.UZ, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UZ),
partitioner=self._partitioners.get(GRU.UZ),
regularizer=self._regularizers.get(GRU.UZ))
self._bz = tf.get_variable(GRU.BZ, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BZ),
partitioner=self._partitioners.get(GRU.BZ),
regularizer=self._regularizers.get(GRU.BZ))
z = tf.sigmoid(tf.matmul(inputs, self._wz) +
tf.matmul(prev_state, self._uz) + self._bz)
self._wr = tf.get_variable(GRU.WR, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WR),
partitioner=self._partitioners.get(GRU.WR),
regularizer=self._regularizers.get(GRU.WR))
self._ur = tf.get_variable(GRU.UR, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UR),
partitioner=self._partitioners.get(GRU.UR),
regularizer=self._regularizers.get(GRU.UR))
self._br = tf.get_variable(GRU.BR, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BR),
partitioner=self._partitioners.get(GRU.BR),
regularizer=self._regularizers.get(GRU.BR))
r = tf.sigmoid(tf.matmul(inputs, self._wr) +
tf.matmul(prev_state, self._ur) + self._br)
self._wh = tf.get_variable(GRU.WH, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WH),
partitioner=self._partitioners.get(GRU.WH),
regularizer=self._regularizers.get(GRU.WH))
self._uh = tf.get_variable(GRU.UH, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UH),
partitioner=self._partitioners.get(GRU.UH),
regularizer=self._regularizers.get(GRU.UH))
self._bh = tf.get_variable(GRU.BH, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BH),
partitioner=self._partitioners.get(GRU.BH),
regularizer=self._regularizers.get(GRU.BH))
h_twiddle = tf.tanh(tf.matmul(inputs, self._wh) +
tf.matmul(r * prev_state, self._uh) + self._bh)
state = (1 - z) * prev_state + z * h_twiddle
return state, state
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
|
gfyoung/pandas | refs/heads/master | pandas/tests/tslibs/test_libfrequencies.py | 7 | import pytest
from pandas._libs.tslibs.parsing import get_rule_month
from pandas.tseries import offsets
@pytest.mark.parametrize(
"obj,expected",
[
("W", "DEC"),
(offsets.Week().freqstr, "DEC"),
("D", "DEC"),
(offsets.Day().freqstr, "DEC"),
("Q", "DEC"),
(offsets.QuarterEnd(startingMonth=12).freqstr, "DEC"),
("Q-JAN", "JAN"),
(offsets.QuarterEnd(startingMonth=1).freqstr, "JAN"),
("A-DEC", "DEC"),
("Y-DEC", "DEC"),
(offsets.YearEnd().freqstr, "DEC"),
("A-MAY", "MAY"),
("Y-MAY", "MAY"),
(offsets.YearEnd(month=5).freqstr, "MAY"),
],
)
def test_get_rule_month(obj, expected):
result = get_rule_month(obj)
assert result == expected
|
ville-k/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/datasets/base.py | 125 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from os import path
import random
import tempfile
import time
import numpy as np
from six.moves import urllib
from tensorflow.contrib.framework import deprecated
from tensorflow.python.platform import gfile
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
def load_csv_without_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file without a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row.pop(target_column))
data.append(np.asarray(row, dtype=features_dtype))
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=data, target=target)
def shrink_csv(filename, ratio):
"""Create a smaller dataset of only 1/ratio of original data."""
filename_small = filename.replace('.', '_small.')
with gfile.Open(filename_small, 'w') as csv_file_small:
writer = csv.writer(csv_file_small)
with gfile.Open(filename) as csv_file:
reader = csv.reader(csv_file)
i = 0
for row in reader:
if i % ratio == 0:
writer.writerow(row)
i += 1
def load_iris(data_path=None):
"""Load Iris dataset.
Args:
data_path: string, path to iris dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'iris.csv')
return load_csv_with_header(
data_path,
target_dtype=np.int,
features_dtype=np.float)
def load_boston(data_path=None):
"""Load Boston housing dataset.
Args:
data_path: string, path to boston dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'boston_house_prices.csv')
return load_csv_with_header(
data_path,
target_dtype=np.float,
features_dtype=np.float)
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
|
asedunov/intellij-community | refs/heads/master | python/testData/paramInfo/TupleAndNamedArg1.py | 83 | def f(a, b, c):
pass
f(<arg_c>c=1, *(10, <arg_star>20))
|
nickmilon/python-hashes | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='python-hashes',
version='0.1',
description='Library of interesting (non-cryptographic) hashes in pure Python.',
author='sangelone',
author_email='angelone@gmail.com',
url='http://github.com/sangelone/python-hashes',
download_url='http://github.com/sangelone/python-hashes/downloads',
packages=find_packages(),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.