repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
puuu/micropython
|
refs/heads/master
|
tests/pyb/servo.py
|
117
|
from pyb import Servo
servo = Servo(1)
print(servo)
servo.angle(0)
servo.angle(10, 100)
servo.speed(-10)
servo.speed(10, 100)
servo.pulse_width(1500)
print(servo.pulse_width())
servo.calibration(630, 2410, 1490, 2460, 2190)
print(servo.calibration())
|
hroncok/freeipa
|
refs/heads/master
|
ipaplatform/rhel/services.py
|
5
|
# Authors:
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Contains RHEL-specific service class implementations.
"""
from ipaplatform.redhat import services as redhat_services
# Mappings from service names as FreeIPA code references to these services
# to their actual systemd service names
rhel_system_units = redhat_services.redhat_system_units
# Service that sets domainname on RHEL is called rhel-domainname.service
rhel_system_units['domainname'] = 'rhel-domainname.service'
# Service classes that implement RHEL-specific behaviour
class RHELService(redhat_services.RedHatService):
system_units = rhel_system_units
# Function that constructs proper RHEL-specific server classes for services
# of specified name
def rhel_service_class_factory(name):
if name == 'domainname':
return RHELService(name)
return redhat_services.redhat_service_class_factory(name)
# Magicdict containing RHELService instances.
class RHELServices(redhat_services.RedHatServices):
def service_class_factory(self, name):
return rhel_service_class_factory(name)
# Objects below are expected to be exported by platform module
from ipaplatform.redhat.services import timedate_services
service = rhel_service_class_factory
knownservices = RHELServices()
|
redapple/scrapy
|
refs/heads/master
|
scrapy/exporters.py
|
18
|
"""
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.encoding = options.pop('encoding', None)
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict) + '\n'
self.file.write(to_bytes(data, self.encoding))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def start_exporting(self):
self.file.write(b"[\n")
def finish_exporting(self):
self.file.write(b"\n]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',\n')
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict)
self.file.write(to_bytes(data, self.encoding))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
for subname, value in serialized_value.items():
self._export_xml_field(subname, value)
elif is_listlike(serialized_value):
for value in serialized_value:
self._export_xml_field('value', value)
elif isinstance(serialized_value, six.text_type):
self._xg_characters(serialized_value)
else:
self._xg_characters(str(serialized_value))
self.xg.endElement(name)
# Workaround for http://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
if not self.encoding:
self.encoding = 'utf-8'
self.include_headers_line = include_headers_line
self.stream = io.TextIOWrapper(
file,
line_buffering=False,
write_through=True,
encoding=self.encoding
) if six.PY3 else file
self.csv_writer = csv.writer(self.stream, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s, self.encoding)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
if not self.encoding:
self.encoding = 'utf-8'
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (six.text_type, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
|
zblz/naima
|
refs/heads/master
|
src/naima/extern/interruptible_pool.py
|
1
|
# -*- coding: utf-8 -*-
"""
Python's multiprocessing.Pool class doesn't interact well with
``KeyboardInterrupt`` signals, as documented in places such as:
* `<http://stackoverflow.com/questions/1408356/>`_
* `<http://stackoverflow.com/questions/11312525/>`_
* `<http://noswap.com/blog/python-multiprocessing-keyboardinterrupt>`_
Various workarounds have been shared. Here, we adapt the one proposed in the
last link above, by John Reese, and shared as
* `<https://github.com/jreese/multiprocessing-keyboardinterrupt/>`_
Our version is a drop-in replacement for multiprocessing.Pool ... as long as
the map() method is the only one that needs to be interrupt-friendly.
Contributed by Peter K. G. Williams <peter@newton.cx>.
*Added in version 2.1.0*
"""
__all__ = ["InterruptiblePool"]
import functools
import signal
from multiprocessing import TimeoutError
from multiprocessing.pool import Pool
def _initializer_wrapper(actual_initializer, *rest):
"""
We ignore SIGINT. It's up to our parent to kill us in the typical
condition of this arising from ``^C`` on a terminal. If someone is
manually killing us with that signal, well... nothing will happen.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if actual_initializer is not None:
actual_initializer(*rest)
class InterruptiblePool(Pool):
"""
A modified version of :class:`multiprocessing.pool.Pool` that has better
behavior with regard to ``KeyboardInterrupts`` in the :func:`map` method.
:param processes: (optional)
The number of worker processes to use; defaults to the number of CPUs.
:param initializer: (optional)
Either ``None``, or a callable that will be invoked by each worker
process when it starts.
:param initargs: (optional)
Arguments for *initializer*; it will be called as
``initializer(*initargs)``.
:param kwargs: (optional)
Extra arguments. Python 2.7 supports a ``maxtasksperchild`` parameter.
"""
wait_timeout = 3600
def __init__(
self, processes=None, initializer=None, initargs=(), **kwargs
):
new_initializer = functools.partial(_initializer_wrapper, initializer)
super().__init__(processes, new_initializer, initargs, **kwargs)
def map(self, func, iterable, chunksize=None):
"""
Equivalent of ``map()`` built-in, without swallowing
``KeyboardInterrupt``.
:param func:
The function to apply to the items.
:param iterable:
An iterable of items that will have `func` applied to them.
"""
# The key magic is that we must call r.get() with a timeout, because
# a Condition.wait() without a timeout swallows KeyboardInterrupts.
r = self.map_async(func, iterable, chunksize)
while True:
try:
return r.get(self.wait_timeout)
except TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise
# Other exceptions propagate up.
|
noirbizarre/django.js
|
refs/heads/master
|
djangojs/__init__.py
|
3
|
# -*- coding: utf-8 -*-
'''
Django.js provide better integration of javascript into Django.
'''
__version__ = '0.8.2.dev'
__description__ = "Django JS Tools"
#: Packaged jQuery version
JQUERY_DEFAULT_VERSION = '2.0.3'
JQUERY_MIGRATE_VERSION = '1.2.1'
|
ChromiumWebApps/chromium
|
refs/heads/master
|
native_client_sdk/src/tools/tests/fix_deps_test.py
|
104
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import tempfile
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
DATA_DIR = os.path.join(SCRIPT_DIR, 'data')
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock")
# For the mock library
sys.path.append(MOCK_DIR)
sys.path.append(PARENT_DIR)
import fix_deps
import mock
class TestFixDeps(unittest.TestCase):
def setUp(self):
self.tempfile = None
def tearDown(self):
if self.tempfile:
os.remove(self.tempfile)
def testRequiresFile(self):
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, fix_deps.main, [])
def testInvalidOption(self):
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, fix_deps.main, ['--foo', 'bar'])
def testMissingFile(self):
with mock.patch('sys.stderr'):
self.assertRaises(fix_deps.Error, fix_deps.main, ['nonexistent.file'])
def testAddsDeps(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo.c foo.h bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 5)
self.assertTrue('foo.c:' in lines)
self.assertTrue('foo.h:' in lines)
self.assertTrue('bar.h:' in lines)
def testSpacesInFilenames(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo\\ bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 3)
self.assertEqual(lines[2], 'foo\\ bar.h:')
def testColonInFilename(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: c:foo.c\\\n c:bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 5)
self.assertEqual(lines[3], 'c:foo.c:')
self.assertEqual(lines[4], 'c:bar.h:')
def testDoubleInvoke(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo\\ bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
self.assertRaises(fix_deps.Error, fix_deps.FixupDepFile, self.tempfile)
if __name__ == '__main__':
unittest.main()
|
HesselTjeerdsma/Cyber-Physical-Pacman-Game
|
refs/heads/master
|
Algor/flask/lib/python2.7/site-packages/pip/exceptions.py
|
344
|
"""Exceptions used throughout package"""
from __future__ import absolute_import
from itertools import chain, groupby, repeat
from pip._vendor.six import iteritems
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class RequirementsFileParseError(InstallationError):
"""Raised when a general error occurs parsing a requirements file line."""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed."""
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
class HashErrors(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self):
self.errors = []
def append(self, error):
self.errors.append(error)
def __str__(self):
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return '\n'.join(lines)
def __nonzero__(self):
return bool(self.errors)
def __bool__(self):
return self.__nonzero__()
class HashError(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req = None
head = ''
def body(self):
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
populate_link() having already been called
"""
return ' %s' % self._requirement_name()
def __str__(self):
return '%s\n%s' % (self.head, self.body())
def _requirement_name(self):
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else 'unknown package'
class VcsHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 0
head = ("Can't verify hashes for these requirements because we don't "
"have a way to hash version control repositories:")
class DirectoryUrlHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 1
head = ("Can't verify hashes for these file:// requirements because they "
"point to directories:")
class HashMissing(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = ('Hashes are required in --require-hashes mode, but they are '
'missing from some requirements. Here is a list of those '
'requirements along with the hashes their downloaded archives '
'actually had. Add lines like these to your requirements files to '
'prevent tampering. (If you did not enable --require-hashes '
'manually, note that it turns on automatically when any package '
'has a hash.)')
def __init__(self, gotten_hash):
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self):
from pip.utils.hashes import FAVORITE_HASH # Dodge circular import.
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (self.req.original_link if self.req.original_link
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, 'req', None))
return ' %s --hash=%s:%s' % (package or 'unknown package',
FAVORITE_HASH,
self.gotten_hash)
class HashUnpinned(HashError):
"""A requirement had a hash specified but was not pinned to a specific
version."""
order = 3
head = ('In --require-hashes mode, all requirements must have their '
'versions pinned with ==. These do not:')
class HashMismatch(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
'FILE. If you have updated the package versions, please update '
'the hashes. Otherwise, examine the package contents carefully; '
'someone may have tampered with them.')
def __init__(self, allowed, gots):
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self):
return ' %s:\n%s' % (self._requirement_name(),
self._hash_comparison())
def _hash_comparison(self):
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name):
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(' or'))
lines = []
for hash_name, expecteds in iteritems(self.allowed):
prefix = hash_then_or(hash_name)
lines.extend((' Expected %s %s' % (next(prefix), e))
for e in expecteds)
lines.append(' Got %s\n' %
self.gots[hash_name].hexdigest())
prefix = ' or'
return '\n'.join(lines)
class UnsupportedPythonVersion(InstallationError):
"""Unsupported python version according to Requires-Python package
metadata."""
|
jrjhealey/bioinfo-tools
|
refs/heads/master
|
fastafetcher.py
|
1
|
#!/usr/bin/env python
# Extract fasta files by their descriptors stored in a separate file.
# Requires biopython
# TODO:
# - Create more sophisticated logic for matching IDs/Descriptions/Partial matches etc.
# - Create a mode variable to encapsulate invert/partial/description/id etc?
from Bio import SeqIO
import sys
import argparse
def get_keys(args):
"""Turns the input key file into a list. May be memory intensive."""
with open(args.keyfile, "r") as kfh:
keys = [line.rstrip("\n").lstrip(">") for line in kfh]
return keys
def get_args():
try:
parser = argparse.ArgumentParser(
description="Retrieve one or more fastas from a given multifasta."
)
parser.add_argument(
"-f",
"--fasta",
action="store",
required=True,
help="The multifasta to search.",
)
parser.add_argument(
"-k",
"--keyfile",
action="store",
help="A file of header strings to search the multifasta for. Must be one per line.",
)
parser.add_argument(
"-s",
"--string",
action="store",
help="Provide a string to look for directly, instead of a file (can accept a comma separated list of strings).",
)
parser.add_argument(
"-o",
"--outfile",
action="store",
default=None,
help="Output file to store the new fasta sequences in. Just prints to screen by default.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set whether to print the key list out before the fasta sequences. Useful for debugging.",
)
parser.add_argument(
"-i",
"--invert",
action="store_true",
help="Invert the search, and retrieve all sequences NOT specified in the keyfile.",
)
parser.add_argument(
"-m",
"--method",
action="store",
choices=["exact", "partial"],
default="exact",
help="Search the headers as exact matches, or as partial substring matches. "
"The latter is dangerous, as headers may be matched twice, so be sure "
"your headers/keys are unique to their respective sequences."
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
except NameError:
sys.stderr.write(
"An exception occured with argument parsing. Check your provided options."
)
sys.exit(1)
return parser.parse_args()
def main():
"""Takes a string or list of strings in a text file (one per line) and retreives them and their sequences from a provided multifasta."""
args = get_args()
# Call getKeys() to create the list of keys from the provided file:
if not (args.keyfile or args.string):
sys.stderr.write("No key source provided. Exiting.")
sys.exit(1)
if args.keyfile:
keys = get_keys(args)
else:
keys = args.string.split(",")
if args.verbose:
if args.invert is False:
sys.stderr.write("Fetching the following keys:\n")
for key in keys:
sys.stderr.write(key + "\n")
elif args.invert is True:
sys.stderr.write(
"Ignoring the following keys, and retreiving everything else from: {}\n".format(
args.fasta
)
)
for key in keys:
sys.stderr.write(key + "\n")
sys.stderr.write(
"-" * 80 + "\n"
)
# Parse in the multifasta and assign an iterable variable:
to_write = []
for rec in SeqIO.parse(args.fasta, "fasta"):
if args.invert is False:
if args.method == "exact":
if rec.id in keys:
print(rec.format("fasta"))
to_write.append(rec)
elif args.method == "partial":
if any(key in rec.description for key in keys):
print(rec.format("fasta"))
to_write.append(rec)
elif args.invert is True:
if args.method == "exact":
if rec.id not in keys:
print(rec.format("fasta"))
to_write.append(rec)
elif args.method == "partial":
if all(key not in rec.description for key in keys):
print(rec.format("fasta"))
to_write.append(rec)
if args.outfile:
SeqIO.write(to_write, args.outfile, "fasta")
if __name__ == "__main__":
main()
|
bpshetty/erpnext
|
refs/heads/master
|
erpnext/crm/report/minutes_to_first_response_for_opportunity/minutes_to_first_response_for_opportunity.py
|
54
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns = [
{
'fieldname': 'creation_date',
'label': 'Date',
'fieldtype': 'Date'
},
{
'fieldname': 'mins',
'fieldtype': 'Float',
'label': 'Mins to First Response'
},
]
data = frappe.db.sql('''select date(creation) as creation_date,
avg(mins_to_first_response) as mins
from tabOpportunity
where date(creation) between %s and %s
and mins_to_first_response > 0
group by creation_date order by creation_date desc''', (filters.from_date, filters.to_date))
return columns, data
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/m2m_through/__init__.py
|
45382
| |
jacob2588m/twitter_with_python_search_api_basics
|
refs/heads/master
|
twitter_retweeters_of_last_retweeted_tweet.py
|
1
|
# Import the necessary package to process data in JSON format
try:
import json
except ImportError:
import simplejson as json
# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = '3184260913-uydgdf878yfb'
ACCESS_SECRET = '1Epl9RJO3e9zkhSfyhud9d8hs7fggbs8gxPxg4JJpfxpSMAe'
CONSUMER_KEY = 'fJiUTHwN98342uedbhje5vJKiADc2'
CONSUMER_SECRET = 'Wj6Gu293udbjhv90udn97x13WUc872i84ybwu6vdZ15zWmx5ByGts0oP6kTW'
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter = Twitter(auth=oauth)
user = "sid_shenoy"
results = twitter.statuses.user_timeline(screen_name = user)
for status in results:
retweets = twitter.statuses.retweets._id(_id = status["id"])
for retweet in retweets:
print " - retweeted by %s" % (retweet["user"]["screen_name"])
|
crosswalk-project/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
build/android/asan_symbolize.py
|
96
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import optparse
import os
import re
import sys
from pylib import constants
# Uses symbol.py from third_party/android_platform, not python's.
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT,
'third_party/android_platform/development/scripts'))
import symbol
_RE_ASAN = re.compile(r'(.*?)(#\S*?) (\S*?) \((.*?)\+(.*?)\)')
def _ParseAsanLogLine(line):
m = re.match(_RE_ASAN, line)
if not m:
return None
return {
'prefix': m.group(1),
'library': m.group(4),
'pos': m.group(2),
'rel_address': '%08x' % int(m.group(5), 16),
}
def _FindASanLibraries():
asan_lib_dir = os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'llvm-build',
'Release+Asserts', 'lib')
asan_libs = []
for src_dir, _, files in os.walk(asan_lib_dir):
asan_libs += [os.path.relpath(os.path.join(src_dir, f))
for f in files
if f.endswith('.so')]
return asan_libs
def _TranslateLibPath(library, asan_libs):
for asan_lib in asan_libs:
if os.path.basename(library) == os.path.basename(asan_lib):
return '/' + asan_lib
return symbol.TranslateLibPath(library)
def _Symbolize(asan_input):
asan_libs = _FindASanLibraries()
libraries = collections.defaultdict(list)
asan_lines = []
for asan_log_line in [a.rstrip() for a in asan_input]:
m = _ParseAsanLogLine(asan_log_line)
if m:
libraries[m['library']].append(m)
asan_lines.append({'raw_log': asan_log_line, 'parsed': m})
all_symbols = collections.defaultdict(dict)
for library, items in libraries.iteritems():
libname = _TranslateLibPath(library, asan_libs)
lib_relative_addrs = set([i['rel_address'] for i in items])
info_dict = symbol.SymbolInformationForSet(libname,
lib_relative_addrs,
True)
if info_dict:
all_symbols[library]['symbols'] = info_dict
for asan_log_line in asan_lines:
m = asan_log_line['parsed']
if not m:
print asan_log_line['raw_log']
continue
if (m['library'] in all_symbols and
m['rel_address'] in all_symbols[m['library']]['symbols']):
s = all_symbols[m['library']]['symbols'][m['rel_address']][0]
print '%s%s %s %s' % (m['prefix'], m['pos'], s[0], s[1])
else:
print asan_log_line['raw_log']
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--logcat',
help='File containing adb logcat output with ASan stacks. '
'Use stdin if not specified.')
options, _ = parser.parse_args()
if options.logcat:
asan_input = file(options.logcat, 'r')
else:
asan_input = sys.stdin
_Symbolize(asan_input.readlines())
if __name__ == "__main__":
sys.exit(main())
|
vvv1559/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/utils/dateformat.py
|
234
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDEfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = getattr(dt, 'tzinfo', None)
if hasattr(self.data, 'hour') and not self.timezone:
self.timezone = LocalTimezone(dt)
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200'"
seconds = self.Z()
return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# Only days can be negative, so negative offsets have days=-1 and
# seconds positive. Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
Tehsmash/nova
|
refs/heads/master
|
nova/version.py
|
61
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from nova.i18n import _LE
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
from six.moves import configparser
from oslo_config import cfg
import logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error(_LE("Failed to load %(cfgfile)s: %(ex)s"),
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
|
firerszd/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/_test_multiprocessing.py
|
67
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.2)
delta = time.time() - start
self.assertGreaterEqual(delta, 0.18)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertLess(join.elapsed, 0.5)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with multiprocessing.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(1)
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(4)
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.script_helper.assert_python_failure(name, sm)
self.assertEqual('', out.decode('ascii'))
self.assertIn('RuntimeError', err.decode('ascii'))
else:
rc, out, err = test.script_helper.assert_python_ok(name, sm)
self.assertEqual('123', out.decode('ascii').rstrip())
self.assertEqual('', err.decode('ascii'))
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
mcornella/gnank
|
refs/heads/main
|
src/domini.py
|
1
|
# -*- coding: utf-8 -*-
# Gnank - cercador d'horaris de la FIB
# Copyright (C) 2006 - 2007 Albert Gasset Romo
# 2011 - 2019 Marc Cornellà
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dades
from dades import ErrorDades, ErrorOpcions
from functools import total_ordering
from itertools import chain
_carrera = ""
_assigs = {}
_horaris = set()
def obre(fitxer):
global _carrera
_carrera, classes, horaris = dades.obre(fitxer)
_assigs.clear()
_afegeix_classes(classes)
_horaris.clear()
for horari in horaris:
afegeix_horari_preferit(horari)
def desa(fitxer):
iters = [assig.tuples_classes() for assig in list(_assigs.values())]
tuples = chain(*iters)
dades.desa(fitxer, _carrera, sorted(tuples), sorted(_horaris))
def actualitza():
global _horaris, _carrera
classes = dades.obre_http(_carrera)
_assigs.clear()
_afegeix_classes(classes)
horaris_valids = set()
for horari in _horaris:
horaris_valids.add(_horari_valid(horari))
horaris_valids.discard(tuple())
_horaris = horaris_valids
def grups_disponibles():
grups_a = lambda a: [(g.nom) for g in sorted(a.grups_disponibles())]
return [(a.nom, grups_a(a)) for a in sorted(_assigs.values())]
def grups_disponibles_mati():
grups_a = lambda a: [(g.nom) for g in sorted(a.grups_disponibles_mati())]
return [(a.nom, grups_a(a)) for a in sorted(_assigs.values())]
def grups_disponibles_tarda():
grups_a = lambda a: [(g.nom) for g in sorted(a.grups_disponibles_tarda())]
return [(a.nom, grups_a(a)) for a in sorted(_assigs.values())]
def horaris_preferits():
return [Horari(g) for g in _horaris]
def es_horari_preferit(horari):
return tuple(sorted(horari)) in _horaris
def afegeix_horari_preferit(horari):
_horaris.add(tuple(sorted(horari)))
def elimina_horari_preferit(horari):
_horaris.discard(tuple(sorted(horari)))
def _afegeix_classes(classes):
for assig, grup, dia, hora, tipus, aula in classes:
try:
classe = Classe(assig, grup, dia, hora, tipus, aula)
except ValueError:
pass
else:
_assigs.setdefault(assig, Assig(assig)).afegeix_classe(classe)
def _hores_classes(assig, grup):
if assig not in _assigs:
return set()
return _assigs[assig].grup(grup).hores_classes()
def _classes_dia_hora(assig, grup, dia, hora):
return _assigs[assig].grup(grup).classes(dia, hora)
def _horari_valid(horari):
horari_valid = []
for nom_assig, grup in horari:
assig = _assigs.get(nom_assig)
if assig and assig.te_grup(grup):
horari_valid.append((nom_assig, grup))
return tuple(horari_valid)
def actualitza_carrera(carrera):
global _carrera
_carrera = carrera
def obte_carrera():
return _carrera
@total_ordering
class Classe(object):
valors_dia = list(range(1, 6))
valors_hora = list(range(8, 21))
valors_tipus = ['T', 'P', 'L']
hora_inici_tarda = 14
def __init__(self, assig, grup, dia, hora, tipus, aula):
if int(dia) not in self.valors_dia: raise ValueError
if int(hora) not in self.valors_hora: raise ValueError
if tipus not in self.valors_tipus: raise ValueError
self.assig = assig
self.grup = grup
self.dia = int(dia)
self.hora = int(hora)
self.tipus = tipus
self.aula = aula
def tupla(self):
return (self.assig, self.grup, str(self.dia), str(self.hora),
self.tipus, self.aula)
def _tipus(self):
return Classe.valors_tipus.index(self.tipus)
def __eq__(self, other):
return (self.assig, self.grup, self.dia, self.hora, self._tipus(), self.aula) \
== (other.assig, other.grup, other.dia, other.hora, other._tipus(), other.aula)
def __lt__(self, other):
return (self.assig, self.grup, self.dia, self.hora, self._tipus(), self.aula) \
< (other.assig, other.grup, other.dia, other.hora, other._tipus(), other.aula)
def __hash__(self):
return hash(self.assig + self.grup + self.tipus + self.aula)
@total_ordering
class Grup(object):
def __init__(self, nom, supergrup=None):
self.nom = nom
self._supergrup = supergrup
self._classes = {} # { (dia, hora): classes }
self._mati = True
self._tarda = True
def afegeix_classe(self, classe):
self._classes.setdefault((classe.dia, classe.hora), set()).add(classe)
if classe.hora < Classe.hora_inici_tarda:
self._tarda = False
else:
self._mati = False
def nomes_mati(self):
return self._mati and (not self._supergrup or self._supergrup._mati)
def nomes_tarda(self):
return self._tarda and (not self._supergrup or self._supergrup._tarda)
def __eq__(self, other):
return self.nom == other.nom
def __lt__(self, other):
return self.nom < other.nom
def __hash__(self):
return hash(self.nom)
def tuples_classes(self):
for classe in chain(*iter(list(self._classes.values()))):
yield classe.tupla()
def hores_classes(self):
hores = set(self._classes.keys())
if self._supergrup:
hores.update(self._supergrup.hores_classes())
return hores
def classes(self, dia, hora):
classes_grup = self._classes.get((dia, hora), [])
classes_supergrup = self._supergrup and \
self._supergrup.classes(dia, hora) or []
return chain(classes_grup, classes_supergrup)
@total_ordering
class Assig(object):
def __init__(self, nom):
self.nom = nom
self._grups = {}
self._supergrups = {}
def afegeix_classe(self, classe):
grup = self._grups.get(classe.grup) \
or self._supergrups.get(classe.grup) \
or self._crea_grup(classe.grup)
grup.afegeix_classe(classe)
def _crea_grup(self, nom):
if nom.isdigit() and int(nom) % 10 != 0:
nom_s = str(int(nom) - int(nom) % 10)
if nom_s in self._grups:
sgrup = self._grups.pop(nom_s)
self._supergrups[nom_s] = sgrup
else:
sgrup = self._supergrups.setdefault(nom_s, Grup(nom_s))
grup = Grup(nom, sgrup)
else:
grup = Grup(nom)
self._grups[nom] = grup
return grup
def grups_disponibles(self):
return iter(list(self._grups.values()))
def grups_disponibles_mati(self):
return [g for g in list(self._grups.values()) if g.nomes_mati()]
def grups_disponibles_tarda(self):
return [g for g in list(self._grups.values()) if g.nomes_tarda()]
def tuples_classes(self):
grups = chain(iter(list(self._grups.values())), iter(list(self._supergrups.values())))
iters_classes = [grup.tuples_classes() for grup in grups]
return chain(*iters_classes)
def grup(self, nom):
return self._grups[nom]
def __eq__(self, other):
return self.nom == other.nom
def __lt__(self, other):
return self.nom < other.nom
def te_grup(self, nom_grup):
return nom_grup in self._grups
@total_ordering
class Horari(object):
def __init__(self, grups=[]):
self._tupla = tuple(sorted(grups))
self._grups = {} # { assig: grups }
for assig, grup in grups:
self._grups.setdefault(assig, set()).add(grup)
self._calcula_estadistiques()
def grups(self):
return self._tupla
def classes(self, dia, hora):
classes = set()
for assig, grups_assig in list(self._grups.items()):
for grup in grups_assig:
for c in _classes_dia_hora(assig, grup, dia, hora):
classes.add(c)
return sorted(classes)
def assignatures(self):
return sorted(self._grups.keys())
def __eq__(self, other):
for (assig1, grup1), (assig2, grup2) in zip(self._tupla, other._tupla):
c = (assig1, grup1) == (assig2, grup2)
if not c: return False
return len(self._tupla) == len(self._tupla)
def __lt__(self, other):
for (assig1, grup1), (assig2, grup2) in zip(self._tupla, other._tupla):
c = (assig1, grup1) < (assig2, grup2)
if not c: return False
return len(self._tupla) < len(self._tupla)
def __hash__(self):
return hash(self._tupla)
def __iter__(self):
return iter(self._tupla)
def _calcula_estadistiques(self):
assigs_dh = {} # { (dia, hora) : assig }
for assig, grup in self._tupla:
for dh in _hores_classes(assig, grup):
assigs_dh.setdefault(dh, set()).add(assig)
self.hores = len(assigs_dh)
self.hores_mati = 0
self.hores_tarda = 0
self.solapaments = 0
self.fragments = 0
self.primera_hora = Classe.valors_hora[-1]
self.ultima_hora = Classe.valors_hora[0]
for (dia, hora), assigs in list(assigs_dh.items()):
if hora < Classe.hora_inici_tarda:
self.hores_mati += 1
else:
self.hores_tarda += 1
self.solapaments += len(assigs) - 1
if (dia, hora - 1) not in assigs_dh:
self.fragments += 1
if hora < self.primera_hora:
self.primera_hora = hora
if hora > self.ultima_hora:
self.ultima_hora = hora
class Cerca(object):
"""Realitza cerques de combinacions d'horaris.
Emmagatzema els grups candidats, el mínim nombre d'assignatures i el
nombre màxim de solapaments."""
def __init__(self, grups, min_assig, max_solap):
"""Inicializa els paràmetres de cerca.
'grups' és una seqüència de tuples (assignatura, grup), 'min_assig' és
el nombre mínim d'assignatures i 'max_solap' el nombre màxim de
solapaments de les solucions."""
# Llista dels noms de les assignatures.
self._assigs = []
# Vector de grups per assignatura. Cada element de posició 'i' és un
# vector dels números de grup de l'assignatura self._assigs[i].
self._grups = []
grups_per_assig = {}
for assig, grup in grups:
grups_assig = grups_per_assig.setdefault(assig, [])
grups_assig.append(grup)
for assig, grups in list(grups_per_assig.items()):
self._assigs.append(assig)
self._grups.append(grups)
self._min_assig = min_assig
self._max_solap = max_solap
def n_combinacions(self):
n = 1
for grups in self._grups:
n *= len(grups) + 1
return n
def horaris(self):
"""Iterador sobre els horaris solució.
Generador que cerca tots els horaris solució."""
# Vector d'enters de la mateixa mida que self._grups. Cada element de
# posició i indica la posició del grup seleccionat de self._grups[i].
# Si l'element de posició 'i' és igual a len(self._grups[i]) vol dir
# que no se selecciona cap grup per aquella assignatura.
# Cada element representa una decisió, un nivell en l'arbre de cerca.
self._grup = [0] * len(self._grups)
# Indica el nombre de classes per cada dia i hora. Les claus són tuples
# (dia, hora) i els valors són enters.
self._classes = {}
# Nombre de solapaments de la combinació actual.
self._n_solap = 0
# Nombre d'assignatures de la combinació actual.
self._n_assig = 0
# Nivell de l'arbre de cerca en la situació actual. Nivell 0 indica
# que no hi ha cap decisió presa. Nivell len(self._grup) indica que
# la combinació és completa i candidata a solució. Quan és negatiu
# indica que ja s'ha acabat la cerca.
self._nivell = 0
# L'arbre de cerca es recorre recursivament, saltant els nodes que
# condueixen a combinacions no solucionables.
while self._nivell >= 0:
# Si el node és fulla, només cal mirar si la combinació és
# solució i tornar cap al pare.
if self._es_fulla():
if self._es_solucio():
yield Horari(self._solucio()), self._combinacio()
self._torna_cap_al_pare()
# Es mira si el node ja s'ha explorat anteriorment, és a dir, si
# s'hi ha arribat retornant des d'un fill enlloc d'anant des del
# pare.
elif self._ja_explorat():
if self._queden_fills():
self._explora_el_fill_seguent()
else:
self._torna_cap_al_pare()
# Si amb els grups seleccionats fins al moment és impossible
# trobar noves solucions es torna cap al pare.
elif self._no_solucionable():
self._torna_cap_al_pare()
# És possible trobar noves solucions, per tant s'exploren els
# fills del node.
else:
self._explora_el_fill_seguent()
def _es_fulla(self):
"""Indica si el node actual és fulla de l'arbre."""
return self._nivell == len(self._grup)
def _es_solucio(self):
"""Indica si la combinació actual és solució."""
return self._n_solap <= self._max_solap \
and self._n_assig >= self._min_assig
def _solucio(self):
"""Llista de grups de la combinació solució."""
grup = self._grup
solucio = []
for n in range(len(grup)):
if grup[n] < len(self._grups[n]):
solucio.append((self._assigs[n], self._grups[n][grup[n]]))
return solucio
def _combinacio(self):
c = 0
n = 1
for i in range(len(self._grup) - 1, -1, -1):
c += self._grup[i] * n
n *= len(self._grups[i]) + 1
return c
def _ja_explorat(self):
"""Cert si el node actual ja s'ha explorat anteriorment.
Retorna un booleà indicant si el node actual s'hi ha arribat des del
pare o des d'un fill."""
return self._grup[self._nivell] > 0
def _queden_fills(self):
"""Cert si quden fills del node actual per explorar."""
nivell = self._nivell
return self._grup[nivell] <= len(self._grups[nivell])
def _no_solucionable(self):
"""Indica si és impossible trobar noves solucions des del node."""
return self._n_solap > self._max_solap \
or self._min_assig - self._n_assig > len(self._grup) - self._nivell
def _explora_el_fill_seguent(self):
"""Explora el següent fill del node."""
nivell = self._nivell
grup = self._grup
grups = self._grups
# Es comprova si s'està seleccionant un grup
if grup[nivell] < len(grups[nivell]):
self._afegeix_grup()
self._n_assig += 1
self._nivell += 1
def _torna_cap_al_pare(self):
"""Situa la cerca al node pare."""
nivell = self._nivell
grup = self._grup
grups = self._grups
# Si no és fulla posa el nivell actual a 0 perquè després es comenci
# amb el primer fill.
if nivell < len(grup):
grup[nivell] = 0
nivell -= 1
self._nivell = nivell
if nivell < 0: return
# Es comprova si s'està desseleccionant un grup
if grup[nivell] < len(grups[nivell]):
self._treu_grup()
self._n_assig -= 1
grup[nivell] += 1
def _afegeix_grup(self):
"""S'afegeixen les classes del grup del nivell actual."""
nivell = self._nivell
assig = self._assigs[nivell]
index_grup = self._grup[nivell]
grup = self._grups[nivell][index_grup]
classes = self._classes
n_solap = self._n_solap
for c in _hores_classes(assig, grup):
s = classes.get(c, 0)
if (s >= 1):
n_solap += 1
classes[c] = s + 1
self._n_solap = n_solap
def _treu_grup(self):
"""Es treuen les classes del grup del nivell actual."""
nivell = self._nivell
assig = self._assigs[nivell]
index_grup = self._grup[nivell]
grup = self._grups[nivell][index_grup]
classes = self._classes
n_solap = self._n_solap
for c in _hores_classes(assig, grup):
s = classes[c] - 1
if (s >= 1):
n_solap -= 1
classes[c] = s
self._n_solap = n_solap
|
chdecultot/erpnext
|
refs/heads/develop
|
erpnext/agriculture/doctype/detected_disease/detected_disease.py
|
23
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DetectedDisease(Document):
pass
|
cpburnz/python-path-specification
|
refs/heads/master
|
pathspec/tests/test_gitwildmatch.py
|
1
|
# encoding: utf-8
"""
This script tests ``GitWildMatchPattern``.
"""
from __future__ import unicode_literals
import re
import sys
import unittest
import pathspec.patterns.gitwildmatch
import pathspec.util
from pathspec.patterns.gitwildmatch import GitWildMatchPattern
if sys.version_info[0] >= 3:
unichr = chr
class GitWildMatchTest(unittest.TestCase):
"""
The ``GitWildMatchTest`` class tests the ``GitWildMatchPattern``
implementation.
"""
def test_00_empty(self):
"""
Tests an empty pattern.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_01_absolute(self):
"""
Tests an absolute path pattern.
This should match:
an/absolute/file/path
an/absolute/file/path/foo
This should NOT match:
foo/an/absolute/file/path
"""
regex, include = GitWildMatchPattern.pattern_to_regex('/an/absolute/file/path')
self.assertTrue(include)
self.assertEqual(regex, '^an/absolute/file/path(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'an/absolute/file/path',
'an/absolute/file/path/foo',
'foo/an/absolute/file/path',
]))
self.assertEqual(results, {
'an/absolute/file/path',
'an/absolute/file/path/foo',
})
def test_01_absolute_root(self):
"""
Tests a single root absolute path pattern.
This should NOT match any file (according to git check-ignore
(v2.4.1)).
"""
regex, include = GitWildMatchPattern.pattern_to_regex('/')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_01_relative(self):
"""
Tests a relative path pattern.
This should match:
spam
spam/
foo/spam
spam/foo
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('spam')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'spam',
'spam/',
'foo/spam',
'spam/foo',
'foo/spam/bar',
]))
self.assertEqual(results, {
'spam',
'spam/',
'foo/spam',
'spam/foo',
'foo/spam/bar',
})
def test_01_relative_nested(self):
"""
Tests a relative nested path pattern.
This should match:
foo/spam
foo/spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
bar/foo/spam
"""
regex, include = GitWildMatchPattern.pattern_to_regex('foo/spam')
self.assertTrue(include)
self.assertEqual(regex, '^foo/spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'foo/spam',
'foo/spam/bar',
'bar/foo/spam',
]))
self.assertEqual(results, {
'foo/spam',
'foo/spam/bar',
})
def test_02_comment(self):
"""
Tests a comment pattern.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('# Cork soakers.')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_02_ignore(self):
"""
Tests an exclude pattern.
This should NOT match (according to git check-ignore (v2.4.1)):
temp/foo
"""
regex, include = GitWildMatchPattern.pattern_to_regex('!temp')
self.assertIsNotNone(include)
self.assertFalse(include)
self.assertEqual(regex, '^(?:.+/)?temp$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match(['temp/foo']))
self.assertEqual(results, set())
def test_03_child_double_asterisk(self):
"""
Tests a directory name with a double-asterisk child
directory.
This should match:
spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('spam/**')
self.assertTrue(include)
self.assertEqual(regex, '^spam/.*$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'spam/bar',
'foo/spam/bar',
]))
self.assertEqual(results, {'spam/bar'})
def test_03_inner_double_asterisk(self):
"""
Tests a path with an inner double-asterisk directory.
This should match:
left/right
left/bar/right
left/foo/bar/right
left/bar/right/foo
This should **not** match (according to git check-ignore (v2.4.1)):
foo/left/bar/right
"""
regex, include = GitWildMatchPattern.pattern_to_regex('left/**/right')
self.assertTrue(include)
self.assertEqual(regex, '^left(?:/.+)?/right(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'left/right',
'left/bar/right',
'left/foo/bar/right',
'left/bar/right/foo',
'foo/left/bar/right',
]))
self.assertEqual(results, {
'left/right',
'left/bar/right',
'left/foo/bar/right',
'left/bar/right/foo',
})
def test_03_only_double_asterisk(self):
"""
Tests a double-asterisk pattern which matches everything.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**')
self.assertTrue(include)
self.assertEqual(regex, '^.+$')
def test_03_parent_double_asterisk(self):
"""
Tests a file name with a double-asterisk parent directory.
This should match:
spam
foo/spam
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**/spam')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'spam',
'foo/spam',
'foo/spam/bar',
]))
self.assertEqual(results, {
'spam',
'foo/spam',
'foo/spam/bar',
})
def test_03_duplicate_leading_double_asterisk_edge_case(self):
"""
Regression test for duplicate leading **/ bug.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**')
self.assertTrue(include)
self.assertEqual(regex, '^.+$')
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/**')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/**/**')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
regex, include = GitWildMatchPattern.pattern_to_regex('**/api')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?api(?:/.*)?$')
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/**/api')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
regex, include = GitWildMatchPattern.pattern_to_regex('**/api/')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?api/.*$')
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/api/**')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/**/api/**/**')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
def test_03_double_asterisk_trailing_slash_edge_case(self):
"""
Tests the edge-case **/ pattern.
This should match everything except individual files in the root directory.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**/')
self.assertTrue(include)
self.assertEqual(regex, '^.+/.*$')
equivalent_regex, include = GitWildMatchPattern.pattern_to_regex('**/**/')
self.assertTrue(include)
self.assertEqual(equivalent_regex, regex)
def test_04_infix_wildcard(self):
"""
Tests a pattern with an infix wildcard.
This should match:
foo--bar
foo-hello-bar
a/foo-hello-bar
foo-hello-bar/b
a/foo-hello-bar/b
"""
regex, include = GitWildMatchPattern.pattern_to_regex('foo-*-bar')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?foo\\-[^/]*\\-bar(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'foo--bar',
'foo-hello-bar',
'a/foo-hello-bar',
'foo-hello-bar/b',
'a/foo-hello-bar/b',
]))
self.assertEqual(results, {
'foo--bar',
'foo-hello-bar',
'a/foo-hello-bar',
'foo-hello-bar/b',
'a/foo-hello-bar/b',
})
def test_04_postfix_wildcard(self):
"""
Tests a pattern with a postfix wildcard.
This should match:
~temp-
~temp-foo
~temp-foo/bar
foo/~temp-bar
foo/~temp-bar/baz
"""
regex, include = GitWildMatchPattern.pattern_to_regex('~temp-*')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?\\~temp\\-[^/]*(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'~temp-',
'~temp-foo',
'~temp-foo/bar',
'foo/~temp-bar',
'foo/~temp-bar/baz',
]))
self.assertEqual(results, {
'~temp-',
'~temp-foo',
'~temp-foo/bar',
'foo/~temp-bar',
'foo/~temp-bar/baz',
})
def test_04_prefix_wildcard(self):
"""
Tests a pattern with a prefix wildcard.
This should match:
bar.py
bar.py/
foo/bar.py
foo/bar.py/baz
"""
regex, include = GitWildMatchPattern.pattern_to_regex('*.py')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?[^/]*\\.py(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'bar.py',
'bar.py/',
'foo/bar.py',
'foo/bar.py/baz',
]))
self.assertEqual(results, {
'bar.py',
'bar.py/',
'foo/bar.py',
'foo/bar.py/baz',
})
def test_05_directory(self):
"""
Tests a directory pattern.
This should match:
dir/
foo/dir/
foo/dir/bar
This should **not** match:
dir
"""
regex, include = GitWildMatchPattern.pattern_to_regex('dir/')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?dir/.*$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'dir/',
'foo/dir/',
'foo/dir/bar',
'dir',
]))
self.assertEqual(results, {
'dir/',
'foo/dir/',
'foo/dir/bar',
})
def test_06_registered(self):
"""
Tests that the pattern is registered.
"""
self.assertIs(pathspec.util.lookup_pattern('gitwildmatch'), GitWildMatchPattern)
def test_06_access_deprecated(self):
"""
Tests that the pattern is accessible from the root module using the
deprecated alias.
"""
self.assertTrue(hasattr(pathspec, 'GitIgnorePattern'))
self.assertTrue(issubclass(pathspec.GitIgnorePattern, GitWildMatchPattern))
def test_06_registered_deprecated(self):
"""
Tests that the pattern is registered under the deprecated alias.
"""
self.assertIs(pathspec.util.lookup_pattern('gitignore'), pathspec.GitIgnorePattern)
def test_07_encode_bytes(self):
"""
Test encoding bytes.
"""
encoded = "".join(map(unichr, range(0,256))).encode(pathspec.patterns.gitwildmatch._BYTES_ENCODING)
expected = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
self.assertEqual(encoded, expected)
def test_07_decode_bytes(self):
"""
Test decoding bytes.
"""
decoded = bytes(bytearray(range(0,256))).decode(pathspec.patterns.gitwildmatch._BYTES_ENCODING)
expected = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
self.assertEqual(decoded, expected)
def test_07_match_bytes_and_bytes(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
results = set(pattern.match([b'a.py']))
self.assertEqual(results, {b'a.py'})
def test_07_match_bytes_and_bytes_complete(self):
"""
Test byte string patterns matching byte string paths.
"""
encoded = bytes(bytearray(range(0,256)))
escaped = b"".join(b"\\" + encoded[i:i+1] for i in range(len(encoded)))
pattern = GitWildMatchPattern(escaped)
results = set(pattern.match([encoded]))
self.assertEqual(results, {encoded})
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is strict")
def test_07_match_bytes_and_unicode(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
results = set(pattern.match(['a.py']))
self.assertEqual(results, {'a.py'})
@unittest.skipIf(sys.version_info[0] == 2, "Python 2 is lenient")
def test_07_match_bytes_and_unicode_fail(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
with self.assertRaises(TypeError):
for _ in pattern.match(['a.py']):
pass
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is strict")
def test_07_match_unicode_and_bytes(self):
"""
Test unicode patterns with byte paths.
"""
pattern = GitWildMatchPattern('*.py')
results = set(pattern.match([b'a.py']))
self.assertEqual(results, {b'a.py'})
@unittest.skipIf(sys.version_info[0] == 2, "Python 2 is lenient")
def test_07_match_unicode_and_bytes_fail(self):
"""
Test unicode patterns with byte paths.
"""
pattern = GitWildMatchPattern('*.py')
with self.assertRaises(TypeError):
for _ in pattern.match([b'a.py']):
pass
def test_07_match_unicode_and_unicode(self):
"""
Test unicode patterns with unicode paths.
"""
pattern = GitWildMatchPattern('*.py')
results = set(pattern.match(['a.py']))
self.assertEqual(results, {'a.py'})
def test_08_escape(self):
"""
Test escaping a string with meta-characters
"""
fname = "file!with*weird#naming_[1].t?t"
escaped = r"file\!with\*weird\#naming_\[1\].t\?t"
result = GitWildMatchPattern.escape(fname)
self.assertEqual(result, escaped)
|
motion2015/a3
|
refs/heads/a3
|
common/lib/xmodule/xmodule/tests/test_bulk_assertions.py
|
173
|
import ddt
import itertools
from xmodule.tests import BulkAssertionTest, BulkAssertionError
STATIC_PASSING_ASSERTIONS = (
('assertTrue', True),
('assertFalse', False),
('assertIs', 1, 1),
('assertEqual', 1, 1),
('assertEquals', 1, 1),
('assertIsNot', 1, 2),
('assertIsNone', None),
('assertIsNotNone', 1),
('assertIn', 1, (1, 2, 3)),
('assertNotIn', 5, (1, 2, 3)),
('assertIsInstance', 1, int),
('assertNotIsInstance', '1', int),
('assertItemsEqual', [1, 2, 3], [3, 2, 1])
)
STATIC_FAILING_ASSERTIONS = (
('assertTrue', False),
('assertFalse', True),
('assertIs', 1, 2),
('assertEqual', 1, 2),
('assertEquals', 1, 2),
('assertIsNot', 1, 1),
('assertIsNone', 1),
('assertIsNotNone', None),
('assertIn', 5, (1, 2, 3)),
('assertNotIn', 1, (1, 2, 3)),
('assertIsInstance', '1', int),
('assertNotIsInstance', 1, int),
('assertItemsEqual', [1, 1, 1], [1, 1])
)
CONTEXT_PASSING_ASSERTIONS = (
('assertRaises', KeyError, {}.__getitem__, '1'),
('assertRaisesRegexp', KeyError, "1", {}.__getitem__, '1'),
)
CONTEXT_FAILING_ASSERTIONS = (
('assertRaises', ValueError, lambda: None),
('assertRaisesRegexp', KeyError, "2", {}.__getitem__, '1'),
)
@ddt.ddt
class TestBulkAssertionTestCase(BulkAssertionTest):
# We have to use assertion methods from the base UnitTest class,
# so we make a number of super calls that skip BulkAssertionTest.
# pylint: disable=bad-super-call
def _run_assertion(self, assertion_tuple):
"""
Run the supplied tuple of (assertion, *args) as a method on this class.
"""
assertion, args = assertion_tuple[0], assertion_tuple[1:]
getattr(self, assertion)(*args)
def _raw_assert(self, assertion_name, *args, **kwargs):
"""
Run an un-modified assertion.
"""
# Use super(BulkAssertionTest) to make sure we get un-adulturated assertions
return getattr(super(BulkAssertionTest, self), 'assert' + assertion_name)(*args, **kwargs)
@ddt.data(*(STATIC_PASSING_ASSERTIONS + CONTEXT_PASSING_ASSERTIONS))
def test_passing_asserts_passthrough(self, assertion_tuple):
self._run_assertion(assertion_tuple)
@ddt.data(*(STATIC_FAILING_ASSERTIONS + CONTEXT_FAILING_ASSERTIONS))
def test_failing_asserts_passthrough(self, assertion_tuple):
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(assertion_tuple)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*CONTEXT_PASSING_ASSERTIONS)
@ddt.unpack
def test_passing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
@ddt.data(*CONTEXT_FAILING_ASSERTIONS)
@ddt.unpack
def test_failing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with self._raw_assert('Raises', AssertionError) as context:
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert(self, passing_assertion, failing_assertion1, failing_assertion2):
contextmanager = self.bulk_assertions()
contextmanager.__enter__()
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._run_assertion(failing_assertion2)
with self._raw_assert('Raises', BulkAssertionError) as context:
contextmanager.__exit__(None, None, None)
self._raw_assert('Equals', len(context.exception.errors), 2)
@ddt.data(*list(itertools.product(
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_nested_bulk_asserts(self, failing_assertion):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(failing_assertion)
with self.bulk_assertions():
self._run_assertion(failing_assertion)
self._run_assertion(failing_assertion)
self._raw_assert('Equal', len(context.exception.errors), 3)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert_closed(self, passing_assertion, failing_assertion1, failing_assertion2):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._raw_assert('Equals', len(context.exception.errors), 1)
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(failing_assertion2)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
|
hantonov/ansible-modules-core
|
refs/heads/devel
|
system/seboolean.py
|
15
|
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ ]
author: "Stephen Fromm (@sfromm)"
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean: name=httpd_can_network_connect state=yes persistent=yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
Byron/pgit-distro
|
refs/heads/master
|
bin/.pgit.py
|
1
|
../lib/bcore/src/python/bprocess/bootstrap.py
|
shadyueh/pyranking
|
refs/heads/master
|
env/lib/python2.7/site-packages/wheel/signatures/ed25519py.py
|
565
|
# -*- coding: utf-8 -*-
import warnings
import os
from collections import namedtuple
from . import djbec
__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair',
'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES']
PUBLICKEYBYTES=32
SECRETKEYBYTES=64
SIGNATUREBYTES=64
Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes)
def crypto_sign(msg, sk):
"""Return signature+message given message and secret key.
The signature is the first SIGNATUREBYTES bytes of the return value.
A copy of msg is in the remainder."""
if len(sk) != SECRETKEYBYTES:
raise ValueError("Bad signing key length %d" % len(sk))
vkbytes = sk[PUBLICKEYBYTES:]
skbytes = sk[:PUBLICKEYBYTES]
sig = djbec.signature(msg, skbytes, vkbytes)
return sig + msg
def crypto_sign_open(signed, vk):
"""Return message given signature+message and the verifying key."""
if len(vk) != PUBLICKEYBYTES:
raise ValueError("Bad verifying key length %d" % len(vk))
rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk)
if not rc:
raise ValueError("rc != True", rc)
return signed[SIGNATUREBYTES:]
|
apple/llvm-project
|
refs/heads/llvm.org/main
|
lldb/test/API/commands/expression/call-function/TestCallUserDefinedFunction.py
|
4
|
"""
Test calling user defined functions using expression evaluation.
Note:
LLDBs current first choice of evaluating functions is using the IR interpreter,
which is only supported on Hexagon. Otherwise JIT is used for the evaluation.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ExprCommandCallUserDefinedFunction(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
"""Test return values of user defined function calls."""
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.cpp"))
# Test recursive function call.
self.expect_expr("fib(5)", result_type="unsigned int", result_value="5")
# Test function with more than one parameter
self.expect_expr("add(4, 8)", result_type="int", result_value="12")
# Test nesting function calls in function parameters
self.expect_expr("add(add(5,2),add(3,4))", result_type="int", result_value="14")
self.expect_expr("add(add(5,2),fib(5))", result_type="int", result_value="12")
# Test function with pointer parameter
self.expect_expr('stringCompare((const char*) \"Hello world\")', result_type="bool", result_value="true")
self.expect_expr('stringCompare((const char*) \"Hellworld\")', result_type="bool", result_value="false")
|
bryceweiner/amkoin
|
refs/heads/master
|
share/qt/clean_mac_info_plist.py
|
2
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Bitcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "AmKoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"amkoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
cruzegoodin/TSC-ShippingDetails
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py
|
317
|
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
|
FrankBian/kuma
|
refs/heads/master
|
vendor/packages/Babel/babel/messages/catalog.py
|
9
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Data structures for message catalogs."""
from cgi import parse_header
from datetime import datetime
from difflib import get_close_matches
from email import message_from_string
from copy import copy
import re
try:
set
except NameError:
from sets import Set as set
import time
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, UTC, FixedOffsetTimezone
__all__ = ['Message', 'Catalog', 'TranslationError']
__docformat__ = 'restructuredtext en'
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, basestring):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return cmp(self.id[0], obj.id[0])
elif plural:
return cmp(self.id[0], obj.id)
elif obj_plural:
return cmp(self.id, obj.id[0])
return cmp(self.id, obj.id)
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError, e:
errors.append(e)
return errors
def fuzzy(self):
return 'fuzzy' in self.flags
fuzzy = property(fuzzy, doc="""\
Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`
""")
def pluralizable(self):
return isinstance(self.id, (list, tuple))
pluralizable = property(pluralizable, doc="""\
Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`
""")
def python_format(self):
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return bool(filter(None, [PYTHON_FORMAT.search(id) for id in ids]))
python_format = property(python_format, doc="""\
Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`
""")
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset='utf-8', fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = datetime.now(LOCALTZ)
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', self.revision_date.strftime('%Y')) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if self.locale is None:
headers.append(('PO-Revision-Date', 'YEAR-MO-DA HO:MI+ZONE'))
headers.append(('Last-Translator', 'FULL NAME <EMAIL@ADDRESS>'))
headers.append(('Language-Team', 'LANGUAGE <LL@li.org>'))
else:
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
headers.append(('Last-Translator', self.last_translator))
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
if name.lower() == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
break
for name, value in headers:
name = name.lower().decode(self.charset)
value = value.decode(self.charset)
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
# FIXME: this should use dates.parse_datetime as soon as that
# is ready
value, tzoffset, _ = re.split('[+-](\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
tzoffset = FixedOffsetTimezone(int(tzoffset[:2]) * 60 +
int(tzoffset[2:]))
dt = datetime.fromtimestamp(ts)
self.creation_date = dt.replace(tzinfo=tzoffset)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
def num_plurals(self):
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
num_plurals = property(num_plurals, doc="""\
The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`
""")
def plural_expr(self):
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
plural_expr = property(plural_expr, doc="""\
The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `basestring`
""")
def plural_forms(self):
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
plural_forms = property(plural_forms, doc="""\
Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`
""")
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry.
"""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``
"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
key = self._key_for(id)
if key in self._messages:
del self._messages[key]
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
:return: the message with the specified ID, or `None` if no such message
is in the catalog
:rtype: `Message`
"""
return self._messages.get(self._key_for(id))
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
headers = message_from_string(message.string.encode(self.charset))
self.mime_headers = headers.items()
self.header_comment = '\n'.join(['# %s' % comment for comment
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
"""
self[id] = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno)
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
>>> template.add('blue', locations=[('main.py', 100)])
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = [
self._key_for(msgid) for msgid in messages
if msgid and messages[msgid].string
]
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, basestring):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
matches = get_close_matches(key.lower().strip(),
fuzzy_candidates, 1)
if matches:
_merge(message, matches[0], key)
continue
self[message.id] = message
self.obsolete = odict()
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
def _key_for(self, id):
"""The key for a message is just the singular ID even for pluralizable
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
return key
|
slackhq/python-slackclient
|
refs/heads/main
|
tests/web/classes/test_objects.py
|
1
|
import copy
import unittest
from typing import Optional, List, Union
from slack.errors import SlackObjectFormationError
from slack.web.classes import JsonObject, JsonValidator
from slack.web.classes.objects import (
ChannelLink,
ConfirmObject,
DateLink,
EveryoneLink,
HereLink,
Link,
MarkdownTextObject,
ObjectLink,
Option,
OptionGroup,
PlainTextObject,
)
from . import STRING_301_CHARS, STRING_51_CHARS
class SimpleJsonObject(JsonObject):
attributes = {"some", "test", "keys"}
def __init__(self):
self.some = "this is"
self.test = "a test"
self.keys = "object"
@JsonValidator("some validation message")
def test_valid(self):
return len(self.test) <= 10
@JsonValidator("this should never fail")
def always_valid_test(self):
return True
class KeyValueObject(JsonObject):
attributes = {"name", "value"}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
):
self.name = name
self.value = value
class NestedObject(JsonObject):
attributes = {"initial", "options"}
def __init__(
self,
*,
initial: Union[dict, KeyValueObject],
options: List[Union[dict, KeyValueObject]],
):
self.initial = (
KeyValueObject(**initial) if isinstance(initial, dict) else initial
)
self.options = [
KeyValueObject(**o) if isinstance(o, dict) else o for o in options
]
class JsonObjectTests(unittest.TestCase):
def setUp(self) -> None:
self.good_test_object = SimpleJsonObject()
obj = SimpleJsonObject()
obj.test = STRING_51_CHARS
self.bad_test_object = obj
def test_json_formation(self):
self.assertDictEqual(
self.good_test_object.to_dict(),
{"some": "this is", "test": "a test", "keys": "object"},
)
def test_validate_json_fails(self):
with self.assertRaises(SlackObjectFormationError):
self.bad_test_object.validate_json()
def test_to_dict_performs_validation(self):
with self.assertRaises(SlackObjectFormationError):
self.bad_test_object.to_dict()
def test_get_non_null_attributes(self):
expected = {"name": "something"}
obj = KeyValueObject(name="something", value=None)
obj2 = copy.deepcopy(obj)
self.assertDictEqual(expected, obj.get_non_null_attributes())
self.assertEqual(str(obj2), str(obj))
def test_get_non_null_attributes_nested(self):
expected = {
"initial": {"name": "something"},
"options": [
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
}
obj1 = KeyValueObject(name="something", value=None)
obj2 = KeyValueObject(name="message", value="That's great!")
options = [obj1, obj2]
nested = NestedObject(initial=obj1, options=options)
self.assertEqual(type(obj1), KeyValueObject)
self.assertTrue(hasattr(obj1, "value"))
self.assertEqual(type(nested.initial), KeyValueObject)
self.assertEqual(type(options[0]), KeyValueObject)
self.assertTrue(hasattr(options[0], "value"))
self.assertEqual(type(nested.options[0]), KeyValueObject)
self.assertTrue(hasattr(nested.options[0], "value"))
dict_value = nested.get_non_null_attributes()
self.assertDictEqual(expected, dict_value)
self.assertEqual(type(obj1), KeyValueObject)
self.assertTrue(hasattr(obj1, "value"))
self.assertEqual(type(nested.initial), KeyValueObject)
self.assertEqual(type(options[0]), KeyValueObject)
self.assertTrue(hasattr(options[0], "value"))
self.assertEqual(type(nested.options[0]), KeyValueObject)
self.assertTrue(hasattr(nested.options[0], "value"))
def test_get_non_null_attributes_nested_2(self):
expected = {
"initial": {"name": "something"},
"options": [
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
}
nested = NestedObject(
initial={"name": "something"},
options=[
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
)
self.assertDictEqual(expected, nested.get_non_null_attributes())
class JsonValidatorTests(unittest.TestCase):
def setUp(self) -> None:
self.validator_instance = JsonValidator("message")
self.class_instance = SimpleJsonObject()
def test_isolated_class(self):
def does_nothing():
return False
wrapped = self.validator_instance(does_nothing)
# noinspection PyUnresolvedReferences
self.assertTrue(wrapped.validator)
def test_wrapped_class(self):
for attribute in dir(self.class_instance):
attr = getattr(self.class_instance, attribute, None)
if attribute in ("test_valid", "always_valid_test"):
self.assertTrue(attr.validator)
else:
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
attr.validator
class LinkTests(unittest.TestCase):
def test_without_text(self):
link = Link(url="http://google.com", text="")
self.assertEqual(f"{link}", "<http://google.com>")
def test_with_text(self):
link = Link(url="http://google.com", text="google")
self.assertEqual(f"{link}", "<http://google.com|google>")
class DateLinkTests(unittest.TestCase):
def setUp(self) -> None:
self.epoch = 1234567890
def test_simple_formation(self):
datelink = DateLink(
date=self.epoch, date_format="{date_long}", fallback=f"{self.epoch}"
)
self.assertEqual(
f"{datelink}", f"<!date^{self.epoch}^{{date_long}}|{self.epoch}>"
)
def test_with_url(self):
datelink = DateLink(
date=self.epoch,
date_format="{date_long}",
link="http://google.com",
fallback=f"{self.epoch}",
)
self.assertEqual(
f"{datelink}",
f"<!date^{self.epoch}^{{date_long}}^http://google.com|{self.epoch}>",
)
class ObjectLinkTests(unittest.TestCase):
def test_channel(self):
objlink = ObjectLink(object_id="C12345")
self.assertEqual(f"{objlink}", "<#C12345>")
def test_group_message(self):
objlink = ObjectLink(object_id="G12345")
self.assertEqual(f"{objlink}", "<#G12345>")
def test_subteam_message(self):
objlink = ObjectLink(object_id="S12345")
self.assertEqual(f"{objlink}", "<!subteam^S12345>")
def test_with_label(self):
objlink = ObjectLink(object_id="C12345", text="abc")
self.assertEqual(f"{objlink}", "<#C12345|abc>")
def test_unknown_prefix(self):
objlink = ObjectLink(object_id="Z12345")
self.assertEqual(f"{objlink}", "<@Z12345>")
class SpecialLinkTests(unittest.TestCase):
def test_channel_link(self):
self.assertEqual(f"{ChannelLink()}", "<!channel|channel>")
def test_here_link(self):
self.assertEqual(f"{HereLink()}", "<!here|here>")
def test_everyone_link(self):
self.assertEqual(f"{EveryoneLink()}", "<!everyone|everyone>")
class PlainTextObjectTests(unittest.TestCase):
def test_basic_json(self):
self.assertDictEqual(
{"text": "some text", "type": "plain_text"},
PlainTextObject(text="some text").to_dict(),
)
self.assertDictEqual(
{"text": "some text", "emoji": False, "type": "plain_text"},
PlainTextObject(text="some text", emoji=False).to_dict(),
)
def test_from_string(self):
plaintext = PlainTextObject(text="some text", emoji=True)
self.assertDictEqual(
plaintext.to_dict(), PlainTextObject.direct_from_string("some text")
)
class MarkdownTextObjectTests(unittest.TestCase):
def test_basic_json(self):
self.assertDictEqual(
{"text": "some text", "type": "mrkdwn"},
MarkdownTextObject(text="some text").to_dict(),
)
self.assertDictEqual(
{"text": "some text", "verbatim": True, "type": "mrkdwn"},
MarkdownTextObject(text="some text", verbatim=True).to_dict(),
)
def test_from_string(self):
markdown = MarkdownTextObject(text="some text")
self.assertDictEqual(
markdown.to_dict(), MarkdownTextObject.direct_from_string("some text")
)
class ConfirmObjectTests(unittest.TestCase):
def test_basic_json(self):
expected = {
"confirm": {"emoji": True, "text": "Yes", "type": "plain_text"},
"deny": {"emoji": True, "text": "No", "type": "plain_text"},
"text": {"text": "are you sure?", "type": "mrkdwn"},
"title": {"emoji": True, "text": "some title", "type": "plain_text"},
}
simple_object = ConfirmObject(title="some title", text="are you sure?")
self.assertDictEqual(expected, simple_object.to_dict())
self.assertDictEqual(expected, simple_object.to_dict("block"))
self.assertDictEqual(
{
"text": "are you sure?",
"title": "some title",
"ok_text": "Okay",
"dismiss_text": "Cancel",
},
simple_object.to_dict("action"),
)
def test_confirm_overrides(self):
confirm = ConfirmObject(
title="some title",
text="are you sure?",
confirm="I'm really sure",
deny="Nevermind",
)
expected = {
"confirm": {"text": "I'm really sure", "type": "plain_text", "emoji": True},
"deny": {"text": "Nevermind", "type": "plain_text", "emoji": True},
"text": {"text": "are you sure?", "type": "mrkdwn"},
"title": {"text": "some title", "type": "plain_text", "emoji": True},
}
self.assertDictEqual(expected, confirm.to_dict())
self.assertDictEqual(expected, confirm.to_dict("block"))
self.assertDictEqual(
{
"text": "are you sure?",
"title": "some title",
"ok_text": "I'm really sure",
"dismiss_text": "Nevermind",
},
confirm.to_dict("action"),
)
def test_passing_text_objects(self):
direct_construction = ConfirmObject(title="title", text="Are you sure?")
mrkdwn = MarkdownTextObject(text="Are you sure?")
preconstructed = ConfirmObject(title="title", text=mrkdwn)
self.assertDictEqual(direct_construction.to_dict(), preconstructed.to_dict())
plaintext = PlainTextObject(text="Are you sure?", emoji=False)
passed_plaintext = ConfirmObject(title="title", text=plaintext)
self.assertDictEqual(
{
"confirm": {"emoji": True, "text": "Yes", "type": "plain_text"},
"deny": {"emoji": True, "text": "No", "type": "plain_text"},
"text": {"emoji": False, "text": "Are you sure?", "type": "plain_text"},
"title": {"emoji": True, "text": "title", "type": "plain_text"},
},
passed_plaintext.to_dict(),
)
def test_title_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(title=STRING_301_CHARS, text="Are you sure?").to_dict()
def test_text_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(title="title", text=STRING_301_CHARS).to_dict()
def test_text_length_with_object(self):
with self.assertRaises(SlackObjectFormationError):
plaintext = PlainTextObject(text=STRING_301_CHARS)
ConfirmObject(title="title", text=plaintext).to_dict()
with self.assertRaises(SlackObjectFormationError):
markdown = MarkdownTextObject(text=STRING_301_CHARS)
ConfirmObject(title="title", text=markdown).to_dict()
def test_confirm_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(
title="title", text="Are you sure?", confirm=STRING_51_CHARS
).to_dict()
def test_deny_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(
title="title", text="Are you sure?", deny=STRING_51_CHARS
).to_dict()
class OptionTests(unittest.TestCase):
def setUp(self) -> None:
self.common = Option(label="an option", value="option_1")
def test_block_style_json(self):
expected = {
"text": {"type": "plain_text", "text": "an option", "emoji": True},
"value": "option_1",
}
self.assertDictEqual(expected, self.common.to_dict("block"))
self.assertDictEqual(expected, self.common.to_dict())
def test_dialog_style_json(self):
expected = {"label": "an option", "value": "option_1"}
self.assertDictEqual(expected, self.common.to_dict("dialog"))
def test_action_style_json(self):
expected = {"text": "an option", "value": "option_1"}
self.assertDictEqual(expected, self.common.to_dict("action"))
def test_from_single_value(self):
option = Option(label="option_1", value="option_1")
self.assertDictEqual(
option.to_dict("text"),
option.from_single_value("option_1").to_dict("text"),
)
def test_label_length(self):
with self.assertRaises(SlackObjectFormationError):
Option(label=STRING_301_CHARS, value="option_1").to_dict("text")
def test_value_length(self):
with self.assertRaises(SlackObjectFormationError):
Option(label="option_1", value=STRING_301_CHARS).to_dict("text")
class OptionGroupTests(unittest.TestCase):
maxDiff = None
def setUp(self) -> None:
self.common_options = [
Option.from_single_value("one"),
Option.from_single_value("two"),
Option.from_single_value("three"),
]
self.common = OptionGroup(label="an option", options=self.common_options)
def test_block_style_json(self):
expected = {
"label": {"emoji": True, "text": "an option", "type": "plain_text"},
"options": [
{
"text": {"emoji": True, "text": "one", "type": "plain_text"},
"value": "one",
},
{
"text": {"emoji": True, "text": "two", "type": "plain_text"},
"value": "two",
},
{
"text": {"emoji": True, "text": "three", "type": "plain_text"},
"value": "three",
},
],
}
self.assertDictEqual(expected, self.common.to_dict("block"))
self.assertDictEqual(expected, self.common.to_dict())
def test_dialog_style_json(self):
self.assertDictEqual(
{
"label": "an option",
"options": [
{"label": "one", "value": "one"},
{"label": "two", "value": "two"},
{"label": "three", "value": "three"},
],
},
self.common.to_dict("dialog"),
)
def test_action_style_json(self):
self.assertDictEqual(
{
"text": "an option",
"options": [
{"text": "one", "value": "one"},
{"text": "two", "value": "two"},
{"text": "three", "value": "three"},
],
},
self.common.to_dict("action"),
)
def test_label_length(self):
with self.assertRaises(SlackObjectFormationError):
OptionGroup(label=STRING_301_CHARS, options=self.common_options).to_dict(
"text"
)
def test_options_length(self):
with self.assertRaises(SlackObjectFormationError):
OptionGroup(label="option_group", options=self.common_options * 34).to_dict(
"text"
)
def test_confirm_style(self):
obj = ConfirmObject.parse(
{
"title": {"type": "plain_text", "text": "Are you sure?"},
"text": {
"type": "mrkdwn",
"text": "Wouldn't you prefer a good game of _chess_?",
},
"confirm": {"type": "plain_text", "text": "Do it"},
"deny": {"type": "plain_text", "text": "Stop, I've changed my mind!"},
"style": "primary",
}
)
obj.validate_json()
self.assertEqual("primary", obj.style)
def test_confirm_style_validation(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject.parse(
{
"title": {"type": "plain_text", "text": "Are you sure?"},
"text": {
"type": "mrkdwn",
"text": "Wouldn't you prefer a good game of _chess_?",
},
"confirm": {"type": "plain_text", "text": "Do it"},
"deny": {
"type": "plain_text",
"text": "Stop, I've changed my mind!",
},
"style": "something-wrong",
}
).validate_json()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/276_test_itertools.py
|
46
|
import unittest
from test import test_support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) // fact(r) // fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
@test_support.impl_detail("tuple resuse is CPython specific")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr
self.assertEqual(comb, filter(set(perm).__contains__, cwr)) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)), range(maxsize-5, maxsize+5))
self.assertEqual(list(islice(count(-maxsize-5), 10)), range(-maxsize-5, -maxsize+5))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(c.next(), -8)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, sys.maxint-5, sys.maxint+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.assertEqual(next(pickle.loads(pickle.dumps(c))), value)
def test_count_with_stride(self):
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(zip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(zip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
c.next()
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
c.next()
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
c.next()
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
for j in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 1, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_tuple_resuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
# target = map(None, *args) <- this raises a py3k warning
# this is the replacement:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})),
zip(list('abc') + [None], 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_longest_tuple_reuse(self):
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater(object):
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def next(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in izip_longest(r1, r2, fillvalue=0):
with test_support.captured_output('stdout'):
print (i, j)
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = izip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
any(forward) # exhaust the iterator
del backward
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, list, compress(N(s), repeat(1)))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return imap(next, imap(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
vovojh/gem5
|
refs/heads/master
|
tests/configs/realview64-switcheroo-o3.py
|
33
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
machine_type='VExpress_EMM64',
mem_class=DDR3_1600_x64,
cpu_classes=(DerivO3CPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
|
samthor/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/ifInWhileUnwrap_before.py
|
80
|
while False:
if True:
# comment
x = 1<caret>
y = 2
|
stephenjoe1/gaap-roadmaps
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py
|
1509
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
josircg/raizcidadanista
|
refs/heads/master
|
raizcidadanista/cadastro/migrations/0011_auto__add_field_membro_contrib_tipo__add_field_membro_contrib_valor.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Membro.contrib_tipo'
db.add_column('cadastro_membro', 'contrib_tipo',
self.gf('django.db.models.fields.CharField')(default='N', max_length=1),
keep_default=False)
# Adding field 'Membro.contrib_valor'
db.add_column('cadastro_membro', 'contrib_valor',
self.gf('utils.fields.BRDecimalField')(default=0, max_digits=7, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Membro.contrib_tipo'
db.delete_column('cadastro_membro', 'contrib_tipo')
# Deleting field 'Membro.contrib_valor'
db.delete_column('cadastro_membro', 'contrib_valor')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cadastro.campanha': {
'Meta': {'ordering': "('dtenvio',)", 'object_name': 'Campanha'},
'assunto': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dtenvio': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lista': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Lista']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'qtde_envio': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'qtde_erros': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'qtde_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.TextField', [], {})
},
'cadastro.circulo': {
'Meta': {'object_name': 'Circulo'},
'descricao': ('django.db.models.fields.TextField', [], {}),
'dtcadastro': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'grupo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Grupo']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagem': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'municipio': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'oficial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site_externo': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'uf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']", 'null': 'True', 'blank': 'True'})
},
'cadastro.circuloevento': {
'Meta': {'object_name': 'CirculoEvento'},
'circulo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Circulo']"}),
'dt_evento': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local': ('django.db.models.fields.TextField', [], {}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'privado': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'cadastro.circulomembro': {
'Meta': {'object_name': 'CirculoMembro'},
'administrador': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'circulo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Circulo']"}),
'grupousuario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.GrupoUsuario']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'membro': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Membro']"})
},
'cadastro.lista': {
'Meta': {'object_name': 'Lista'},
'analytics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'seo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'A'", 'max_length': '1'}),
'validade': ('django.db.models.fields.DateField', [], {})
},
'cadastro.listacadastro': {
'Meta': {'ordering': "('lista', 'pessoa__nome')", 'object_name': 'ListaCadastro'},
'dtinclusao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lista': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Lista']"}),
'pessoa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Pessoa']"})
},
'cadastro.membro': {
'Meta': {'ordering': "['nome']", 'object_name': 'Membro', '_ormbases': ['cadastro.Pessoa']},
'aprovador': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro_aprovador'", 'null': 'True', 'to': "orm['auth.User']"}),
'atividade_profissional': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'contrib_tipo': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'contrib_valor': ('utils.fields.BRDecimalField', [], {'default': '0', 'max_digits': '7', 'decimal_places': '2'}),
'cpf': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dtnascimento': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook_access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'filiacao_partidaria': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filiado': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'municipio_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome_da_mae': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'pessoa_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cadastro.Pessoa']", 'unique': 'True', 'primary_key': 'True'}),
'rg': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'secao_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'titulo_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'uf_eleitoral': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']", 'null': 'True', 'blank': 'True'}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro'", 'null': 'True', 'to': "orm['auth.User']"}),
'zona_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'cadastro.pessoa': {
'Meta': {'ordering': "['nome']", 'object_name': 'Pessoa'},
'celular': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dtcadastro': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'residencial': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'default': "'O'", 'max_length': '1'}),
'status_email': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'uf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.grupo': {
'Meta': {'object_name': 'Grupo'},
'descricao': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'forum.grupousuario': {
'Meta': {'unique_together': "(('grupo', 'usuario'),)", 'object_name': 'GrupoUsuario'},
'grupo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Grupo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'municipios.uf': {
'Meta': {'ordering': "(u'nome',)", 'object_name': 'UF'},
'id_ibge': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regiao': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'uf': ('django.db.models.fields.CharField', [], {'max_length': '2'})
}
}
complete_apps = ['cadastro']
|
BoltzmannBrain/nupic
|
refs/heads/master
|
tests/unit/nupic/regions/regions_spec_test.py
|
35
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest2 as unittest
from nupic.regions.Spec import (Spec,
InputSpec,
OutputSpec,
ParameterSpec,
CommandSpec)
class KNNAnomalyClassifierRegionTest(unittest.TestCase):
def testInvalidInputSpec(self):
with self.assertRaises(Exception):
_x = InputSpec()
with self.assertRaises(Exception):
_x = InputSpec(dataType="int", count=-4)
with self.assertRaises(Exception):
_x = InputSpec(description=555, dataType="int", count=4)
def testValidInputSpec(self):
try:
x = InputSpec(dataType="int", count=4)
x.invariant()
x = InputSpec(description="description",
dataType="int",
count=3,
required=True,
regionLevel=True,
isDefaultInput=True,
requireSplitterMap=True)
x.invariant()
except:
self.fail("Got unexpected exception")
def testInvalidOutputSpec(self):
with self.assertRaises(Exception):
_x = OutputSpec()
with self.assertRaises(Exception):
_x = OutputSpec(dataType="int", count=4, isDefaultOutput="Sure")
with self.assertRaises(Exception):
_x = OutputSpec(description=555, dataType="int", count=4)
def testValidOutputSpec(self):
try:
x = OutputSpec(dataType="int", count=4)
x.invariant()
x = OutputSpec(description="description",
dataType="int",
count=3,
regionLevel=True,
isDefaultOutput=True)
x.invariant()
except:
self.fail("Got unexpected exception")
def testInvalidParameterSpec(self):
with self.assertRaises(Exception):
_x = ParameterSpec()
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int", count=4, defaultValue="not an int")
with self.assertRaises(Exception):
_x = ParameterSpec(description=555, dataType="int")
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int",
accessMode="no such mode")
with self.assertRaises(Exception):
_x = ParameterSpec(dataType="int",
defaultValue=5,
accessMode="Read")
def testValidParameterSpec(self):
try:
x = ParameterSpec(dataType="int", accessMode="Read")
x.invariant()
x = ParameterSpec(description="description",
dataType="int",
count=3,
defaultValue=-6,
accessMode="Create")
x.invariant()
except:
self.fail("Got unexpected exception")
@unittest.skip("(#616) Disabled for now,"
"to add error checking in commandSpec later.")
def testInvalidCommandSpec(self):
with self.assertRaises(Exception):
_x = CommandSpec()
with self.assertRaises(Exception):
_x = CommandSpec(description=None)
with self.assertRaises(Exception):
_x = CommandSpec(description=3)
def testValidCommandSpec(self):
try:
x = CommandSpec("")
x.invariant()
x = CommandSpec(description="")
x.invariant()
x = CommandSpec(description="this is a command")
x.invariant()
except:
self.fail("Got unexpected exception")
@unittest.skip("(#617) Disabled for now,"
"to add error checking in Spec initializer later.")
def testInvalidSpec(self):
with self.assertRaises(Exception):
_x = Spec()
with self.assertRaises(Exception):
_x = Spec(description=3)
with self.assertRaises(Exception):
_x = Spec(description="123", singleNodeOnly=3)
def testValidSpec(self):
try:
x = Spec(description="123", singleNodeOnly=True)
x.invariant()
x = Spec(description="123", singleNodeOnly=True)
x.commands = dict(command1=CommandSpec("A command"),
command2=CommandSpec("Another command"))
x.invariant()
except:
self.fail("Got unexpected exception")
def testSpec_toDict(self):
x = Spec(description="123", singleNodeOnly=True)
d = x.toDict()
self.assertEqual(d["description"], "123")
self.assertTrue(d["singleNodeOnly"])
self.assertTrue(d["inputs"] == d["outputs"]
== d["parameters"] == d["commands"] == {})
x.inputs = dict(i1=InputSpec(dataType="int"),
i2=InputSpec(dataType="str", isDefaultInput=True))
x.outputs = dict(o=OutputSpec(dataType="float", count=8))
x.parameters = dict(p=ParameterSpec(description="param",
dataType="float",
defaultValue=3.14,
accessMode="Create"))
d = x.toDict()
inputs = d["inputs"]
self.assertEqual(len(inputs), 2)
i1 = inputs["i1"]
self.assertEqual(i1["count"], 1)
self.assertFalse(i1["isDefaultInput"])
self.assertEqual(i1["description"], "")
self.assertEqual(i1["dataType"], "int")
self.assertFalse(i1["required"])
self.assertTrue(i1["requireSplitterMap"])
self.assertFalse(i1["regionLevel"])
i2 = inputs["i2"]
self.assertEqual(i2["count"], 1)
self.assertTrue(i2["isDefaultInput"])
self.assertEqual(i2["description"], "")
self.assertEqual(i2["dataType"], "str")
self.assertFalse(i2["required"])
self.assertTrue(i2["requireSplitterMap"])
self.assertFalse(i2["regionLevel"])
outputs = d["outputs"]
self.assertEqual(len(outputs), 1)
o = outputs["o"]
self.assertEqual(o["count"], 8)
self.assertFalse(o["isDefaultOutput"])
self.assertEqual(o["description"], "")
self.assertEqual(o["dataType"], "float")
self.assertFalse(o["regionLevel"])
parameters = d["parameters"]
self.assertEqual(len(parameters), 1)
p = parameters["p"]
self.assertEqual(p["description"], "param")
self.assertEqual(p["dataType"], "float")
self.assertEqual(p["accessMode"], "Create")
self.assertEqual(p["defaultValue"], 3.14)
self.assertEqual(p["count"], 1)
self.assertEqual(p["constraints"], "")
self.assertEqual(d["commands"], {})
if __name__ == "__main__":
unittest.main()
|
justinlulejian/fah-gae
|
refs/heads/master
|
libs/flask/_compat.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
|
thebarbershopper/Empire
|
refs/heads/master
|
lib/modules/situational_awareness/network/sharefinder.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ShareFinder',
'Author': ['@harmj0y'],
'Description': ('Finds shares on machines in the domain.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerView'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Hosts' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'HostList' : {
'Description' : 'Hostlist to enumerate.',
'Required' : False,
'Value' : ''
},
'HostFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'NoPing' : {
'Description' : 'Don\'t ping each host to ensure it\'s up before enumerating.',
'Required' : False,
'Value' : ''
},
'CheckShareAccess' : {
'Description' : 'Switch. Only display found shares that the local user has access to.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'Domain to enumerate for hosts.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-ShareFinder.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-ShareFinder "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += '| Out-String | %{$_ + \"`n\"};"`nInvoke-ShareFinder completed"'
return script
|
cybercarnage/mongo-web-shell
|
refs/heads/master
|
mongows/mws/util.py
|
7
|
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymongo.collection import Collection
from pymongo.database import Database
import mongows
from mongows.mws.db import get_db
from flask import current_app
from mongows.mws.MWSServerError import MWSServerError
def get_internal_coll_name(res_id, collection_name):
return '%s%s' % (res_id, collection_name)
def get_collection_names(res_id):
"""
Get the collection names associated with a given resource id. Should not be
called from within a 'with UseResId(res_id)' block.
"""
return get_db()[mongows.mws.views.CLIENTS_COLLECTION].find(
{'res_id': res_id}, {'collections': 1, '_id': 0}
)[0]['collections']
class UseResId:
def __init__(self, res_id):
self.res_id = str(res_id)
self.id_length = len(self.res_id)
self.client_collection = get_db()[mongows.mws.views.CLIENTS_COLLECTION]
def __enter__(self):
self.old_get_attr = Database.__getattr__
self.old_drop_collection = Database.drop_collection
def __getattr__(db, name):
if not (name.startswith("oplog.$main") or name.startswith("$cmd")):
name = '%s%s' % (self.res_id, name)
return self.old_get_attr(db, name)
def drop_collection(db, name):
if isinstance(name, Collection):
name = name.name
name = '%s%s' % (self.res_id, name)
self.remove_client_collection(name)
self.old_drop_collection(db, name)
Database.__getattr__ = __getattr__
Database.drop_collection = drop_collection
self.old_insert = Collection.insert
self.old_update = Collection.update
self.old_drop = Collection.drop
def insert(coll, *args, **kwargs):
self.insert_client_collection(coll.name)
self.old_insert(coll, *args, **kwargs)
def update(coll, *args, **kwargs):
if kwargs.get('upsert', False):
self.insert_client_collection(coll.name)
self.old_update(coll, *args, **kwargs)
def drop(coll):
self.remove_client_collection(coll.name)
# Call through to db.drop, making sure it doesn't re-mangle
self.old_drop_collection(coll.database, coll.name)
Collection.insert = insert
Collection.update = update
Collection.drop = drop
def __exit__(self, exc_type, exc_val, exc_tb):
Database.__getattr__ = self.old_get_attr
Database.drop_collection = self.old_drop_collection
Collection.insert = self.old_insert
Collection.update = self.old_update
Collection.drop = self.old_drop
def insert_client_collection(self, name):
if name.startswith(self.res_id):
name = name[self.id_length:]
limit = current_app.config.get('QUOTA_NUM_COLLECTIONS')
if limit is not None:
data = self.client_collection.find_one(
{'res_id': self.res_id},
{'collections': 1}
)
if len(set(data['collections']).union(name)) > limit:
raise MWSServerError(429, 'Max number of collections exceeded')
self.client_collection.update(
{'res_id': self.res_id},
{'$addToSet': {'collections': name}},
multi=True
)
def remove_client_collection(self, name):
if name.startswith(self.res_id):
name = name[self.id_length:]
self.client_collection.update(
{'res_id': self.res_id},
{'$pull': {'collections': name}},
multi=True
)
|
murphy-wang/aws-ec2
|
refs/heads/master
|
deploy_templates.py
|
35
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
# Deploy the configuration file templates in the spark-ec2/templates directory
# to the root filesystem, substituting variables such as the master hostname,
# ZooKeeper URL, etc as read from the environment.
# Find system memory in KB and compute Spark's default limit from that
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
cpu_command = "nproc"
master_ram_kb = int(
os.popen(mem_command).read().strip())
# This is the master's memory. Try to find slave's memory as well
first_slave = os.popen("cat /root/spark-ec2/slaves | head -1").read().strip()
slave_mem_command = "ssh -t -o StrictHostKeyChecking=no %s %s" %\
(first_slave, mem_command)
slave_cpu_command = "ssh -t -o StrictHostKeyChecking=no %s %s" %\
(first_slave, cpu_command)
slave_ram_kb = int(os.popen(slave_mem_command).read().strip())
slave_cpus = int(os.popen(slave_cpu_command).read().strip())
system_ram_kb = min(slave_ram_kb, master_ram_kb)
system_ram_mb = system_ram_kb / 1024
slave_ram_mb = slave_ram_kb / 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if slave_ram_mb > 100*1024:
slave_ram_mb = slave_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif slave_ram_mb > 60*1024:
slave_ram_mb = slave_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif slave_ram_mb > 40*1024:
slave_ram_mb = slave_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif slave_ram_mb > 20*1024:
slave_ram_mb = slave_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif slave_ram_mb > 10*1024:
slave_ram_mb = slave_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
slave_ram_mb = max(512, slave_ram_mb - 1300) # Leave 1.3 GB RAM
# Make tachyon_mb as slave_ram_mb for now.
tachyon_mb = slave_ram_mb
worker_instances_str = ""
worker_cores = slave_cpus
if os.getenv("SPARK_WORKER_INSTANCES") != "":
worker_instances = int(os.getenv("SPARK_WORKER_INSTANCES", 1))
worker_instances_str = "%d" % worker_instances
# Distribute equally cpu cores among worker instances
worker_cores = max(slave_cpus / worker_instances, 1)
template_vars = {
"master_list": os.getenv("MASTERS"),
"active_master": os.getenv("MASTERS").split("\n")[0],
"slave_list": os.getenv("SLAVES"),
"hdfs_data_dirs": os.getenv("HDFS_DATA_DIRS"),
"mapred_local_dirs": os.getenv("MAPRED_LOCAL_DIRS"),
"spark_local_dirs": os.getenv("SPARK_LOCAL_DIRS"),
"spark_worker_mem": "%dm" % slave_ram_mb,
"spark_worker_instances": worker_instances_str,
"spark_worker_cores": "%d" % worker_cores,
"spark_master_opts": os.getenv("SPARK_MASTER_OPTS", ""),
"spark_version": os.getenv("SPARK_VERSION"),
"tachyon_version": os.getenv("TACHYON_VERSION"),
"hadoop_major_version": os.getenv("HADOOP_MAJOR_VERSION"),
"java_home": os.getenv("JAVA_HOME"),
"default_tachyon_mem": "%dMB" % tachyon_mb,
"system_ram_mb": "%d" % system_ram_mb,
"aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"),
}
template_dir="/root/spark-ec2/templates"
for path, dirs, files in os.walk(template_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(template_dir):])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
with open(os.path.join(path, filename)) as src:
with open(dest_file, "w") as dest:
print("Configuring " + dest_file)
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key] or '')
dest.write(text)
dest.close()
|
vatsala/python_koans
|
refs/heads/master
|
python2/runner/sensei.py
|
43
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) != 'AboutAsserts':
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
|
alvaralmstedt/pdf_create
|
refs/heads/master
|
create-pdf.py
|
1
|
from reportlab.lib.styles import ParagraphStyle as PS
from reportlab.platypus import PageBreak
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus.frames import Frame
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.platypus import Image, Frame
import csv
from reportlab.lib.units import cm, inch
from reportlab.lib import utils
from reportlab.lib.colors import pink, black, red, blue, green
import sys
import openpyxl
from openpyxl.cell import get_column_letter, column_index_from_string
ARGUMENTLIST = sys.argv
DATE = ARGUMENTLIST[1]
DATAFILE = ARGUMENTLIST[2]
INVESTIGATOR = ARGUMENTLIST[3]
INSTRUMENT = ARGUMENTLIST[4]
RUNID = ARGUMENTLIST[5]
#PASSWORD = ARGUMENSTLIST[6]
EXCELFILE = openpyxl.load_workbook('161017_NB501037_0079_AH3YLKBGXY.xlsx')
SAMPLESSHEET = EXCELFILE.get_sheet_by_name('Samples')
EXCELBOOL = True
EXCELSTART = False
XLLINES = 0
EXCELTABLE = []
while EXCELBOOL == True:
XLLINES += 1
if SAMPLESSHEET['F%s' % XLLINES].value == 'Raw Reads':
EXCELSTART = True
continue
if EXCELSTART == True:
if '=SUM' in str(SAMPLESSHEET['F%s' % XLLINES].value):
EXCELBOOL = False
continue
else:
EXCELTABLE.append(str(SAMPLESSHEET['F%s' % XLLINES].value))
print EXCELTABLE
data = csv.reader(open(DATAFILE,"rb"))
DATALIST = list(data)
mypdf = 'mypdffile.pdf'
c = canvas.Canvas(mypdf, pagesize=A4)
frontimage = 'SAsidelog.png'
genomics = 'genomics_core.png'
def get_image(path, width):
img = utils.ImageReader(path)
iw, ih = img.getSize()
aspect = ih / float(iw)
height = width * aspect
return height
newheight_fac = get_image(frontimage, 20)
newheight_gen = get_image(genomics, 8)
# LOGOS
c.drawImage(frontimage, 8, 7, width=20*cm, height=newheight_fac*cm, mask='auto')
c.drawImage(genomics, 150, 640, width=8*cm, height=newheight_gen*cm, mask='auto')
c.setFont('Helvetica', 18, leading=None)
c.drawString(160, 600, 'Bioinformatics')
c.setFont('Helvetica', 15, leading=None)
c.drawString(160, 580, 'Core Facility')
c.setFont('Helvetica-Bold', 28, leading=None)
# HEADER
c.setFillGray(0.25)
c.drawString(160, 480, 'Sequencing Report')
c.setFillColorRGB(0.2,0.1,0.5)
c.setFont('Helvetica-Bold', 22, leading=None)
c.drawString(160, 455, 'Raw data delivery and Bioinformatics')
# DATE, INVESTIGATOR AND PROJECT NAME
c.setFillColorRGB(0,0,0)
c.setFont('Helvetica', 15, leading=None)
ANALYSISDATE = 'Analysis date: ' + DATE
c.drawString(160, 380, ANALYSISDATE)
INVEST = 'Investigator: ' + INVESTIGATOR
c.setFont('Helvetica', 15, leading=None)
c.drawString(160, 360, INVEST)
c.setFont('Helvetica', 15, leading=None)
PROJ = DATALIST[3][1]
PROJECT = 'Project: ' + PROJ
c.drawString(160, 340, PROJECT)
c.showPage()
c.setFillGray(0.25)
c.setFont('Helvetica-Bold', 22, leading=None)
c.drawString(60, 740, 'Sequencing Report')
c.setStrokeColorRGB(0.2,0.1,0.5)
c.line(1.5*cm,720,19.5*cm,720)
c.line(1.5*cm,435,19.5*cm,435)
c.setFont('Helvetica-Bold', 18, leading=None)
c.drawString(60, 690, 'Contents')
ROW1 = 'Contents ................................................................................................................................. 2'
ROW2 = 'Sequencing run information ................................................................................................... 2'
ROW3 = 'General run information ......................................................................................................... 2'
ROW4 = 'QC plot and statistics ............................................................................................................ 3'
ROW5 = 'Sample information ................................................................................................................ 4'
ROW6 = 'Data information ..................................................................................................................... 5'
ROW7 = 'Data delivery ........................................................................................................................ 5'
ROW8 = 'Data structure ....................................................................................................................... 5'
c.setFont('Helvetica', 12, leading=None)
c.drawString(65, 670, ROW1)
c.drawString(65, 650, ROW2)
c.drawString(70, 630, ROW3)
c.drawString(70, 610, ROW4)
c.drawString(65, 590, ROW5)
c.drawString(65, 570, ROW6)
c.drawString(70, 550, ROW7)
c.drawString(70, 530, ROW8)
RUNINFOLISTc = []
RUNINFOLISTc.append(RUNID)
RUNINFOLISTc.append(RUNID.split("_")[0])
RUNINFOLISTc.append(INSTRUMENT)
READLENGTH = '2x' + DATALIST[12][0]
RUNINFOLISTc.append(READLENGTH)
RUNINFOLISTc.append('placeholder1')
RUNINFOLISTc.append('placeholder2')
RUNINFOLISTc.append('placeholder3')
RUNINFOLISTc.append('placeholder4')
RUNINFOYpos = 380
RUNINFOLIST = ['Run ID', 'Date', 'Instrument', 'Read length', 'Reagent kit version', 'Application', 'Library preperation kit', 'Protocol']
c.setFont('Helvetica-Bold', 12, leading=None)
for i in RUNINFOLIST:
c.drawString(65, RUNINFOYpos, i)
RUNINFOYpos= RUNINFOYpos - 20
if i == RUNINFOLIST[4] or i == RUNINFOLIST[6]:
RUNINFOYpos = RUNINFOYpos - 30
RUNINFOYpos = 380
c.setFont('Helvetica', 12, leading=None)
for i in RUNINFOLISTc:
c.drawString(330, RUNINFOYpos, i)
RUNINFOYpos= RUNINFOYpos - 20
if i == RUNINFOLISTc[4] or i == RUNINFOLISTc[6]:
RUNINFOYpos = RUNINFOYpos - 30
c.showPage()
DATAARRAY = []
DATABOOL = False
for i in DATALIST:
if i[0] == "Sample_ID":
DATABOOL = True
if DATABOOL == True:
DATAARRAY.append([i[0], i[4], i[5]])
print DATAARRAY
COUNTAPPEND = 0
for i in DATAARRAY:
DATAARRAY.append([COUNTAPPEND]
COUNTAPPEND += 1
c.showPage()
c.showPage()
c.setStrokeColorRGB(0.2,0.1,0.5)
c.line(1*cm,180,20*cm,180)
textobject = c.beginText()
textobject.setTextOrigin(2*cm,160)
textobject.setFont("Helvetica", 10)
textobject.textLines('''Publications are important to us, as are our users. Publications are signs that our work generates interesting
results. This enables us to apply for more funds to keep the centre running. We would appreciate if our
facility is mentioned in the Acknowledgements section when data have been obtained in our lab, for example
by using the sentence: "We would like to thank the Genomics and Bioinformatics Core Facility platforms,
at the Sahlgrenska Academy, University of Gothenburg".''')
c.drawText(textobject)
c.showPage()
c.save()
|
amcgee/pymomo
|
refs/heads/master
|
pymomo/hex8/instructions/retlw.py
|
2
|
from utils import *
class RetlwInstruction:
def __init__(self, args):
self.args = parse_args("i", args)
def __str__(self):
return "retlw 0x%x" % self.args[0]
def encode(self):
return (0b110100 << 8) | self.args[0]
|
kevingu1003/python-pptx
|
refs/heads/master
|
pptx/oxml/chart/shared.py
|
6
|
# encoding: utf-8
"""
Shared oxml objects for charts.
"""
from __future__ import absolute_import, print_function, unicode_literals
from ..simpletypes import (
ST_LayoutMode, XsdBoolean, XsdDouble, XsdString, XsdUnsignedInt
)
from ..xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrOne
)
class CT_Boolean(BaseOxmlElement):
"""
Common complex type used for elements having a True/False value.
"""
val = OptionalAttribute('val', XsdBoolean, default=True)
class CT_Double(BaseOxmlElement):
"""
Used for floating point values.
"""
val = RequiredAttribute('val', XsdDouble)
class CT_Layout(BaseOxmlElement):
"""
``<c:layout>`` custom element class
"""
manualLayout = ZeroOrOne('c:manualLayout', successors=('c:extLst',))
@property
def horz_offset(self):
"""
The float value in ./c:manualLayout/c:x when
c:layout/c:manualLayout/c:xMode@val == "factor". 0.0 if that XPath
expression finds no match.
"""
manualLayout = self.manualLayout
if manualLayout is None:
return 0.0
return manualLayout.horz_offset
@horz_offset.setter
def horz_offset(self, offset):
"""
Set the value of ./c:manualLayout/c:x@val to *offset* and
./c:manualLayout/c:xMode@val to "factor". Remove ./c:manualLayout if
*offset* == 0.
"""
if offset == 0.0:
self._remove_manualLayout()
return
manualLayout = self.get_or_add_manualLayout()
manualLayout.horz_offset = offset
class CT_LayoutMode(BaseOxmlElement):
"""
Used for ``<c:xMode>``, ``<c:yMode>``, ``<c:wMode>``, and ``<c:hMode>``
child elements of CT_ManualLayout.
"""
val = OptionalAttribute(
'val', ST_LayoutMode, default=ST_LayoutMode.FACTOR
)
class CT_ManualLayout(BaseOxmlElement):
"""
``<c:manualLayout>`` custom element class
"""
_tag_seq = (
'c:layoutTarget', 'c:xMode', 'c:yMode', 'c:wMode', 'c:hMode', 'c:x',
'c:y', 'c:w', 'c:h', 'c:extLst'
)
xMode = ZeroOrOne('c:xMode', successors=_tag_seq[2:])
x = ZeroOrOne('c:x', successors=_tag_seq[6:])
del _tag_seq
@property
def horz_offset(self):
"""
The float value in ./c:x@val when ./c:xMode@val == "factor". 0.0 when
./c:x is not present or ./c:xMode@val != "factor".
"""
x, xMode = self.x, self.xMode
if x is None or xMode is None or xMode.val != ST_LayoutMode.FACTOR:
return 0.0
return x.val
@horz_offset.setter
def horz_offset(self, offset):
"""
Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor".
"""
self.get_or_add_xMode().val = ST_LayoutMode.FACTOR
self.get_or_add_x().val = offset
class CT_NumFmt(BaseOxmlElement):
"""
``<c:numFmt>`` element specifying the formatting for number labels on a
tick mark or data point.
"""
formatCode = RequiredAttribute('formatCode', XsdString)
sourceLinked = OptionalAttribute('sourceLinked', XsdBoolean)
class CT_UnsignedInt(BaseOxmlElement):
"""
``<c:idx>`` element and others.
"""
val = RequiredAttribute('val', XsdUnsignedInt)
|
Laknot/libvpx
|
refs/heads/master
|
tools/diff.py
|
83
|
#!/usr/bin/env python
## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
"""Classes for representing diff pieces."""
__author__ = "jkoleszar@google.com"
import re
class DiffLines(object):
"""A container for one half of a diff."""
def __init__(self, filename, offset, length):
self.filename = filename
self.offset = offset
self.length = length
self.lines = []
self.delta_line_nums = []
def Append(self, line):
l = len(self.lines)
if line[0] != " ":
self.delta_line_nums.append(self.offset + l)
self.lines.append(line[1:])
assert l+1 <= self.length
def Complete(self):
return len(self.lines) == self.length
def __contains__(self, item):
return item >= self.offset and item <= self.offset + self.length - 1
class DiffHunk(object):
"""A container for one diff hunk, consisting of two DiffLines."""
def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
self.header = header
self.left = DiffLines(file_a, start_a, len_a)
self.right = DiffLines(file_b, start_b, len_b)
self.lines = []
def Append(self, line):
"""Adds a line to the DiffHunk and its DiffLines children."""
if line[0] == "-":
self.left.Append(line)
elif line[0] == "+":
self.right.Append(line)
elif line[0] == " ":
self.left.Append(line)
self.right.Append(line)
elif line[0] == "\\":
# Ignore newline messages from git diff.
pass
else:
assert False, ("Unrecognized character at start of diff line "
"%r" % line[0])
self.lines.append(line)
def Complete(self):
return self.left.Complete() and self.right.Complete()
def __repr__(self):
return "DiffHunk(%s, %s, len %d)" % (
self.left.filename, self.right.filename,
max(self.left.length, self.right.length))
def ParseDiffHunks(stream):
"""Walk a file-like object, yielding DiffHunks as they're parsed."""
file_regex = re.compile(r"(\+\+\+|---) (\S+)")
range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
hunk = None
while True:
line = stream.readline()
if not line:
break
if hunk is None:
# Parse file names
diff_file = file_regex.match(line)
if diff_file:
if line.startswith("---"):
a_line = line
a = diff_file.group(2)
continue
if line.startswith("+++"):
b_line = line
b = diff_file.group(2)
continue
# Parse offset/lengths
diffrange = range_regex.match(line)
if diffrange:
if diffrange.group(2):
start_a = int(diffrange.group(1))
len_a = int(diffrange.group(3))
else:
start_a = 1
len_a = int(diffrange.group(1))
if diffrange.group(5):
start_b = int(diffrange.group(4))
len_b = int(diffrange.group(6))
else:
start_b = 1
len_b = int(diffrange.group(4))
header = [a_line, b_line, line]
hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
else:
# Add the current line to the hunk
hunk.Append(line)
# See if the whole hunk has been parsed. If so, yield it and prepare
# for the next hunk.
if hunk.Complete():
yield hunk
hunk = None
# Partial hunks are a parse error
assert hunk is None
|
spaam/svtplay-dl
|
refs/heads/master
|
lib/svtplay_dl/__main__.py
|
3
|
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import svtplay_dl
if __name__ == "__main__":
svtplay_dl.main()
|
gsnbng/erpnext
|
refs/heads/develop
|
erpnext/agriculture/doctype/agriculture_analysis_criteria/agriculture_analysis_criteria.py
|
24
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AgricultureAnalysisCriteria(Document):
pass
|
asimshankar/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
|
18
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reshape bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Reshape",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _static_ndims_from_shape(shape):
return tensor_shape.dimension_value(shape.shape.with_rank_at_least(1)[0])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _ndims_from_shape(shape):
return array_ops.shape(shape)[0]
class Reshape(bijector.Bijector):
"""Reshapes the `event_shape` of a `Tensor`.
The semantics generally follow that of `tf.reshape()`, with
a few differences:
* The user must provide both the input and output shape, so that
the transformation can be inverted. If an input shape is not
specified, the default assumes a vector-shaped input, i.e.,
event_shape_in = (-1,).
* The `Reshape` bijector automatically broadcasts over the leftmost
dimensions of its input (`sample_shape` and `batch_shape`); only
the rightmost `event_ndims_in` dimensions are reshaped. The
number of dimensions to reshape is inferred from the provided
`event_shape_in` (`event_ndims_in = len(event_shape_in)`).
Example usage:
```python
import tensorflow_probability as tfp
tfb = tfp.bijectors
r = tfb.Reshape(event_shape_out=[1, -1])
r.forward([3., 4.]) # shape [2]
# ==> [[3., 4.]] # shape [1, 2]
r.forward([[1., 2.], [3., 4.]]) # shape [2, 2]
# ==> [[[1., 2.]],
# [[3., 4.]]] # shape [2, 1, 2]
r.inverse([[3., 4.]]) # shape [1,2]
# ==> [3., 4.] # shape [2]
r.forward_log_det_jacobian(any_value)
# ==> 0.
r.inverse_log_det_jacobian(any_value)
# ==> 0.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, event_shape_out, event_shape_in=(-1,),
validate_args=False, name=None):
"""Creates a `Reshape` bijector.
Args:
event_shape_out: An `int`-like vector-shaped `Tensor`
representing the event shape of the transformed output.
event_shape_in: An optional `int`-like vector-shape `Tensor`
representing the event shape of the input. This is required in
order to define inverse operations; the default of (-1,)
assumes a vector-shaped input.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if either `event_shape_in` or `event_shape_out` has
non-integer `dtype`.
ValueError: if either of `event_shape_in` or `event_shape_out`
has non-vector shape (`rank > 1`), or if their sizes do not
match.
"""
with ops.name_scope(name, "reshape",
values=[event_shape_out, event_shape_in]):
event_shape_out = ops.convert_to_tensor(event_shape_out,
name="event_shape_out",
preferred_dtype=dtypes.int32)
event_shape_in = ops.convert_to_tensor(event_shape_in,
name="event_shape_in",
preferred_dtype=dtypes.int32)
assertions = []
assertions.extend(self._maybe_check_valid_shape(
event_shape_out, validate_args))
assertions.extend(self._maybe_check_valid_shape(
event_shape_in, validate_args))
self._assertions = assertions
self._event_shape_in = event_shape_in
self._event_shape_out = event_shape_out
super(Reshape, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "reshape")
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
def _reshape_helper(self, x, event_shape_in, event_shape_out):
"""Reshape only the event_shape of an input `Tensor`."""
event_ndims_in_ = _static_ndims_from_shape(event_shape_in)
event_ndims_in = _ndims_from_shape(event_shape_in)
x_ndims_, x_ndims = x.shape.ndims, array_ops.rank(x)
assertions = []
# Ensure x.event_shape is compatible with event_shape_in.
if (event_ndims_in_ is not None
and x_ndims_ is not None
and x.shape.with_rank_at_least(event_ndims_in_)[
x_ndims_-event_ndims_in_:].is_fully_defined()):
x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking
np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2
else:
x_event_shape_, x_event_shape = (
None, array_ops.shape(x)[x_ndims-event_ndims_in:])
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if x_event_shape_ is not None and event_shape_in_ is not None:
# Compare the shape dimensions that are fully specified in the
# input (i.e., for which event_shape_in is not -1). If x_event_shape
# matches along all of these dimensions, it is compatible with
# the desired input shape and any further mismatches (i.e.,
# imcompatibility with the desired *output* shape) will be
# caught inside of array_ops.reshape() below.
x_event_shape_specified_ = x_event_shape_[event_shape_in_ >= 0]
event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
if not np.equal(x_event_shape_specified_,
event_shape_in_specified_).all():
raise ValueError(
"Input `event_shape` does not match `event_shape_in` ({} vs {}).".
format(x_event_shape_, event_shape_in_))
elif self.validate_args:
# Similarly to the static case, we compare the shape dimensions
# that are fully specified in the input. We extract these
# dimensions using boolean_mask(), which requires that the mask
# have known ndims. We can assume that shape Tensors always have
# ndims==1 (this assumption is verified inside of
# _maybe_check_valid_shape), so the reshape operation is just a
# no-op that formally encodes this fact to make boolean_mask()
# happy.
event_shape_mask = array_ops.reshape(event_shape_in >= 0, [-1])
x_event_shape_specified = array_ops.boolean_mask(x_event_shape,
event_shape_mask)
event_shape_in_specified = array_ops.boolean_mask(event_shape_in,
event_shape_mask)
assertions.append(check_ops.assert_equal(
x_event_shape_specified, event_shape_in_specified,
message="Input `event_shape` does not match `event_shape_in`."))
if assertions:
x = control_flow_ops.with_dependencies(assertions, x)
# get the parts of shape(x) that will not change
sample_and_batch_shape = array_ops.shape(x)
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x))
sample_and_batch_shape = sample_and_batch_shape[
:(ndims - math_ops.abs(event_ndims_in))]
if (event_ndims_in_ is not None
and x_ndims_ is not None
and event_ndims_in_ == x_ndims_):
# Hack to allow forward/inverse_event_shape to do shape
# inference by calling this helper method with a dummy Tensor of
# shape event_shape_in. In this special case,
# sample_and_batch_shape will be empty so we can preserve static
# shape information by avoiding the concat operation below
# (which would be a no-op).
new_shape = event_shape_out
else:
new_shape = array_ops.concat(
[sample_and_batch_shape, event_shape_out], axis=0)
return array_ops.reshape(x, new_shape)
def _forward(self, x):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(x,
self._event_shape_in,
self._event_shape_out)
def _inverse(self, y):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(y,
self._event_shape_out,
self._event_shape_in)
def _inverse_log_det_jacobian(self, y):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=x.dtype)
def _forward_event_shape(self, input_shape):
# NOTE: this method and the other *_event_shape* methods
# compute shape by explicit transformation of a dummy
# variable. This approach is not generally recommended because it
# bloats the graph and could in general trigger side effects.
#
# In this particular case of the Reshape bijector, the
# forward and inverse transforms have no side effects, and we
# believe the reduction in code complexity from delegating the
# heavy lifting to tf.reshape() is worth the added graph ops.
# However, you should think hard before implementing this approach
# in other Bijectors; it is strongly preferred to compute
# shapes explicitly whenever it's feasible to do so.
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return dummy_reshaped.shape
def _inverse_event_shape(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return dummy_reshaped.shape
def _forward_event_shape_tensor(self, input_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return array_ops.shape(dummy_reshaped)
def _inverse_event_shape_tensor(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return array_ops.shape(dummy_reshaped)
|
mlk/thefuck
|
refs/heads/master
|
tests/rules/test_javac.py
|
17
|
import pytest
from thefuck.rules.javac import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='javac foo'),
Command(script='javac bar')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('javac foo'), 'javac foo.java'),
(Command('javac bar'), 'javac bar.java')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
|
becloudready/devopstraining
|
refs/heads/master
|
python/mock.py
|
1
|
# This is a Mock project for DevOps class
# Python-Paramiko (Module)
# SSH client online
# Python-OS (Module) - create directory
#
import paramiko
import requests
url = 'http://www.google.com'
loc='/home/index.html'
def google(url,loc):
r=requests.get(url)
return r.content
#print r.status_code
file=open(loc, 'wb')
o=r.status_code
for output in r.iter_content():
if o == 200:
file.write(output)
else:
print "Code Faulty"
return
def clt():
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect( '192.168.0.13' , username='ahsan', password='ahsankhan')
ins, out, err = ssh.exec_command("mkdir /opt/google" )
e=err.read()
if e is not None:
return "Error"
g = google(url,loc)
cmd = "cat %s << EOD" % g
ssh.exe_command(cmd)
ssh.exec_command("cd /opt/google; wget google.com")
clt()
|
jk1/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/comments/templatetags/comments.py
|
309
|
from django import template
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib import comments
from django.utils.encoding import smart_unicode
register = template.Library()
class BaseCommentNode(template.Node):
"""
Base helper class (abstract) for handling the get_comment_* template tags.
Looks a bit strange, but the subclasses below should make this a bit more
obvious.
"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse get_comment_list/count/form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% get_whatever for obj as varname %}
if len(tokens) == 5:
if tokens[3] != 'as':
raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0])
return cls(
object_expr = parser.compile_filter(tokens[2]),
as_varname = tokens[4],
)
# {% get_whatever for app.model pk as varname %}
elif len(tokens) == 6:
if tokens[4] != 'as':
raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0])
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3]),
as_varname = tokens[5]
)
else:
raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0])
handle_token = classmethod(handle_token)
#@staticmethod
def lookup_content_type(token, tagname):
try:
app, model = token.split('.')
return ContentType.objects.get(app_label=app, model=model)
except ValueError:
raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model))
lookup_content_type = staticmethod(lookup_content_type)
def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None):
if ctype is None and object_expr is None:
raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.")
self.comment_model = comments.get_model()
self.as_varname = as_varname
self.ctype = ctype
self.object_pk_expr = object_pk_expr
self.object_expr = object_expr
self.comment = comment
def render(self, context):
qs = self.get_query_set(context)
context[self.as_varname] = self.get_context_value_from_queryset(context, qs)
return ''
def get_query_set(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if not object_pk:
return self.comment_model.objects.none()
qs = self.comment_model.objects.filter(
content_type = ctype,
object_pk = smart_unicode(object_pk),
site__pk = settings.SITE_ID,
)
# The is_public and is_removed fields are implementation details of the
# built-in comment model's spam filtering system, so they might not
# be present on a custom comment model subclass. If they exist, we
# should filter on them.
field_names = [f.name for f in self.comment_model._meta.fields]
if 'is_public' in field_names:
qs = qs.filter(is_public=True)
if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names:
qs = qs.filter(is_removed=False)
return qs
def get_target_ctype_pk(self, context):
if self.object_expr:
try:
obj = self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None, None
return ContentType.objects.get_for_model(obj), obj.pk
else:
return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True)
def get_context_value_from_queryset(self, context, qs):
"""Subclasses should override this."""
raise NotImplementedError
class CommentListNode(BaseCommentNode):
"""Insert a list of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return list(qs)
class CommentCountNode(BaseCommentNode):
"""Insert a count of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return qs.count()
class CommentFormNode(BaseCommentNode):
"""Insert a form for the comment model into the context."""
def get_form(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
return comments.get_form()(ctype.get_object_for_this_type(pk=object_pk))
else:
return None
def render(self, context):
context[self.as_varname] = self.get_form(context)
return ''
class RenderCommentFormNode(CommentFormNode):
"""Render the comment form directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_form for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_form for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/form.html" % (ctype.app_label, ctype.model),
"comments/%s/form.html" % ctype.app_label,
"comments/form.html"
]
context.push()
formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context)
context.pop()
return formstr
else:
return ''
class RenderCommentListNode(CommentListNode):
"""Render the comment list directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_list and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_list for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_list for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/list.html" % (ctype.app_label, ctype.model),
"comments/%s/list.html" % ctype.app_label,
"comments/list.html"
]
qs = self.get_query_set(context)
context.push()
liststr = render_to_string(template_search_list, {
"comment_list" : self.get_context_value_from_queryset(context, qs)
}, context)
context.pop()
return liststr
else:
return ''
# We could just register each classmethod directly, but then we'd lose out on
# the automagic docstrings-into-admin-docs tricks. So each node gets a cute
# wrapper function that just exists to hold the docstring.
#@register.tag
def get_comment_count(parser, token):
"""
Gets the comment count for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_count for [object] as [varname] %}
{% get_comment_count for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_count for event as comment_count %}
{% get_comment_count for calendar.event event.id as comment_count %}
{% get_comment_count for calendar.event 17 as comment_count %}
"""
return CommentCountNode.handle_token(parser, token)
#@register.tag
def get_comment_list(parser, token):
"""
Gets the list of comments for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_list for [object] as [varname] %}
{% get_comment_list for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_list for event as comment_list %}
{% for comment in comment_list %}
...
{% endfor %}
"""
return CommentListNode.handle_token(parser, token)
#@register.tag
def render_comment_list(parser, token):
"""
Render the comment list (as returned by ``{% get_comment_list %}``)
through the ``comments/list.html`` template
Syntax::
{% render_comment_list for [object] %}
{% render_comment_list for [app].[model] [object_id] %}
Example usage::
{% render_comment_list for event %}
"""
return RenderCommentListNode.handle_token(parser, token)
#@register.tag
def get_comment_form(parser, token):
"""
Get a (new) form object to post a new comment.
Syntax::
{% get_comment_form for [object] as [varname] %}
{% get_comment_form for [app].[model] [object_id] as [varname] %}
"""
return CommentFormNode.handle_token(parser, token)
#@register.tag
def render_comment_form(parser, token):
"""
Render the comment form (as returned by ``{% render_comment_form %}``) through
the ``comments/form.html`` template.
Syntax::
{% render_comment_form for [object] %}
{% render_comment_form for [app].[model] [object_id] %}
"""
return RenderCommentFormNode.handle_token(parser, token)
#@register.simple_tag
def comment_form_target():
"""
Get the target URL for the comment form.
Example::
<form action="{% comment_form_target %}" method="post">
"""
return comments.get_form_target()
#@register.simple_tag
def get_comment_permalink(comment, anchor_pattern=None):
"""
Get the permalink for a comment, optionally specifying the format of the
named anchor to be appended to the end of the URL.
Example::
{{ get_comment_permalink comment "#c%(id)s-by-%(user_name)s" }}
"""
if anchor_pattern:
return comment.get_absolute_url(anchor_pattern)
return comment.get_absolute_url()
register.tag(get_comment_count)
register.tag(get_comment_list)
register.tag(get_comment_form)
register.tag(render_comment_form)
register.simple_tag(comment_form_target)
register.simple_tag(get_comment_permalink)
register.tag(render_comment_list)
|
ryfeus/lambda-packs
|
refs/heads/master
|
Keras_tensorflow_nightly/source2.7/absl/flags/_helpers.py
|
2
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal helper functions for Abseil Python flags library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import struct
import sys
import textwrap
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import six
from six.moves import range # pylint: disable=redefined-builtin
_DEFAULT_HELP_WIDTH = 80 # Default width of help output.
_MIN_HELP_WIDTH = 40 # Minimal "sane" width of help output. We assume that any
# value below 40 is unreasonable.
# Define the allowed error rate in an input string to get suggestions.
#
# We lean towards a high threshold because we tend to be matching a phrase,
# and the simple algorithm used here is geared towards correcting word
# spellings.
#
# For manual testing, consider "<command> --list" which produced a large number
# of spurious suggestions when we used "least_errors > 0.5" instead of
# "least_erros >= 0.5".
_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50
# Characters that cannot appear or are highly discouraged in an XML 1.0
# document. (See http://www.w3.org/TR/REC-xml/#charsets or
# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0)
_ILLEGAL_XML_CHARS_REGEX = re.compile(
u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]')
# This is a set of module ids for the modules that disclaim key flags.
# This module is explicitly added to this set so that we never consider it to
# define key flag.
disclaim_module_ids = set([id(sys.modules[__name__])])
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module.
# Initialized inside flagvalues.py.
SPECIAL_FLAGS = None
# This points to the flags module, initialized in flags/__init__.py.
# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into
# account.
FLAGS_MODULE = None
class _ModuleObjectAndName(
collections.namedtuple('_ModuleObjectAndName', 'module module_name')):
"""Module object and name.
Fields:
- module: object, module object.
- module_name: str, module name.
"""
def get_module_object_and_name(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
_ModuleObjectAndName - pair of module object & module name.
Returns (None, None) if the module could not be identified.
"""
name = globals_dict.get('__name__', None)
module = sys.modules.get(name, None)
# Pick a more informative name for the main module.
return _ModuleObjectAndName(module,
(sys.argv[0] if name == '__main__' else name))
def get_calling_module_object_and_name():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
Returns:
The module object that called into this one.
Raises:
AssertionError: Raised when no calling module could be identified.
"""
for depth in range(1, sys.getrecursionlimit()):
# sys._getframe is the right thing to use here, as it's the best
# way to walk up the call stack.
globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access
module, module_name = get_module_object_and_name(globals_for_frame)
if id(module) not in disclaim_module_ids and module_name is not None:
return _ModuleObjectAndName(module, module_name)
raise AssertionError('No module was found')
def get_calling_module():
"""Returns the name of the module that's calling into this module."""
return get_calling_module_object_and_name().module_name
def str_or_unicode(value):
"""Converts a value to a python string.
Behavior of this function is intentionally different in Python2/3.
In Python2, the given value is attempted to convert to a str (byte string).
If it contains non-ASCII characters, it is converted to a unicode instead.
In Python3, the given value is always converted to a str (unicode string).
This behavior reflects the (bad) practice in Python2 to try to represent
a string as str as long as it contains ASCII characters only.
Args:
value: An object to be converted to a string.
Returns:
A string representation of the given value. See the description above
for its type.
"""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value) # Python3 should never come here
def create_xml_dom_element(doc, name, value):
"""Returns an XML DOM element with name and text value.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
name: str, the tag of XML element.
value: object, whose string representation will be used
as the value of the XML element. Illegal or highly discouraged xml 1.0
characters are stripped.
Returns:
An instance of minidom.Element.
"""
s = str_or_unicode(value)
if six.PY2 and not isinstance(s, unicode):
# Get a valid unicode string.
s = s.decode('utf-8', 'ignore')
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
s = s.lower()
# Remove illegal xml characters.
s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)
e = doc.createElement(name)
e.appendChild(doc.createTextNode(s))
return e
def get_help_width():
"""Returns the integer width of help lines that is used in TextWrap."""
if not sys.stdout.isatty() or termios is None or fcntl is None:
return _DEFAULT_HELP_WIDTH
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable.
if columns >= _MIN_HELP_WIDTH:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH))
except (TypeError, IOError, struct.error):
return _DEFAULT_HELP_WIDTH
def get_flag_suggestions(attempt, longopt_list):
"""Returns helpful similar matches for an invalid flag."""
# Don't suggest on very short strings, or if no longopts are specified.
if len(attempt) <= 2 or not longopt_list:
return []
option_names = [v.split('=')[0] for v in longopt_list]
# Find close approximations in flag prefixes.
# This also handles the case where the flag is spelled right but ambiguous.
distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option)
for option in option_names]
# t[0] is distance, and sorting by t[1] allows us to have stable output.
distances.sort()
least_errors, _ = distances[0]
# Don't suggest excessively bad matches.
if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
return []
suggestions = []
for errors, name in distances:
if errors == least_errors:
suggestions.append(name)
else:
break
return suggestions
def _damerau_levenshtein(a, b):
"""Returns Damerau-Levenshtein edit distance from a to b."""
memo = {}
def distance(x, y):
"""Recursively defined string distance with memoization."""
if (x, y) in memo:
return memo[x, y]
if not x:
d = len(y)
elif not y:
d = len(x)
else:
d = min(
distance(x[1:], y) + 1, # correct an insertion error
distance(x, y[1:]) + 1, # correct a deletion error
distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character
if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:
# Correct a transposition.
t = distance(x[2:], y[2:]) + 1
if d > t:
d = t
memo[x, y] = d
return d
return distance(a, b)
def text_wrap(text, length=None, indent='', firstline_indent=None):
"""Wraps a given text to a maximum line length and returns it.
It turns lines that only contain whitespace into empty lines, keeps new lines,
and expands tabs using 4 spaces.
Args:
text: str, text to wrap.
length: int, maximum length of a line, includes indentation.
If this is None then use get_help_width()
indent: str, indent for all but first line.
firstline_indent: str, indent for first line; if None, fall back to indent.
Returns:
str, the wrapped text.
Raises:
ValueError: Raised if indent or firstline_indent not shorter than length.
"""
# Get defaults where callee used None
if length is None:
length = get_help_width()
if indent is None:
indent = ''
if firstline_indent is None:
firstline_indent = indent
if len(indent) >= length:
raise ValueError('Length of indent exceeds length')
if len(firstline_indent) >= length:
raise ValueError('Length of first line indent exceeds length')
text = text.expandtabs(4)
result = []
# Create one wrapper for the first paragraph and one for subsequent
# paragraphs that does not have the initial wrapping.
wrapper = textwrap.TextWrapper(
width=length, initial_indent=firstline_indent, subsequent_indent=indent)
subsequent_wrapper = textwrap.TextWrapper(
width=length, initial_indent=indent, subsequent_indent=indent)
# textwrap does not have any special treatment for newlines. From the docs:
# "...newlines may appear in the middle of a line and cause strange output.
# For this reason, text should be split into paragraphs (using
# str.splitlines() or similar) which are wrapped separately."
for paragraph in (p.strip() for p in text.splitlines()):
if paragraph:
result.extend(wrapper.wrap(paragraph))
else:
result.append('') # Keep empty lines.
# Replace initial wrapper with wrapper for subsequent paragraphs.
wrapper = subsequent_wrapper
return '\n'.join(result)
def flag_dict_to_args(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: dict, a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value)
def trim_docstring(docstring):
"""Removes indentation from triple-quoted strings.
This is the function specified in PEP 257 to handle docstrings:
https://www.python.org/dev/peps/pep-0257/.
Args:
docstring: str, a python docstring.
Returns:
str, docstring with indentation removed.
"""
if not docstring:
return ''
# If you've got a line longer than this you have other problems...
max_indent = 1 << 29
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = max_indent
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < max_indent:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def doc_to_help(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines.
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings.
doc = trim_docstring(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space.
# 1) keep double new lines;
# 2) keep ws after new lines if not empty line;
# 3) all other new lines shall be changed to a space;
# Solution: Match new lines between non white space and replace with space.
doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M)
return doc
def is_bytes_or_string(maybe_string):
if str is bytes:
return isinstance(maybe_string, basestring)
else:
return isinstance(maybe_string, (str, bytes))
|
rouault/Quantum-GIS
|
refs/heads/master
|
python/plugins/processing/tools/postgis.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
postgis.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Martin Dobias
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Dobias'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Martin Dobias'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import psycopg2
import psycopg2.extensions # For isolation levels
import re
import os
from qgis.core import (QgsProcessingException,
QgsDataSourceUri,
QgsCredentials,
QgsSettings)
from qgis.PyQt.QtCore import QCoreApplication
# Use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
def uri_from_name(conn_name):
settings = QgsSettings()
settings.beginGroup(u"/PostgreSQL/connections/%s" % conn_name)
if not settings.contains("database"): # non-existent entry?
raise QgsProcessingException(QCoreApplication.translate("PostGIS", 'There is no defined database connection "{0}".').format(conn_name))
uri = QgsDataSourceUri()
settingsList = ["service", "host", "port", "database", "username", "password", "authcfg"]
service, host, port, database, username, password, authcfg = [settings.value(x, "", type=str) for x in settingsList]
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
sslmode = settings.value("sslmode", QgsDataSourceUri.SslPrefer, type=int)
settings.endGroup()
if hasattr(authcfg, 'isNull') and authcfg.isNull():
authcfg = ''
if service:
uri.setConnection(service, database, username, password, sslmode, authcfg)
else:
uri.setConnection(host, port, database, username, password, sslmode, authcfg)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
return uri
class TableAttribute(object):
def __init__(self, row):
(self.num,
self.name,
self.data_type,
self.char_max_len,
self.modifier,
self.notnull,
self.hasdefault,
self.default,
) = row
class TableConstraint(object):
"""Class that represents a constraint of a table (relation).
"""
(TypeCheck, TypeForeignKey, TypePrimaryKey, TypeUnique) = list(range(4))
types = {
'c': TypeCheck,
'f': TypeForeignKey,
'p': TypePrimaryKey,
'u': TypeUnique,
}
on_action = {
'a': 'NO ACTION',
'r': 'RESTRICT',
'c': 'CASCADE',
'n': 'SET NULL',
'd': 'SET DEFAULT',
}
match_types = {'u': 'UNSPECIFIED', 'f': 'FULL', 'p': 'PARTIAL'}
def __init__(self, row):
(self.name, con_type, self.is_defferable, self.is_deffered, keys) = row[:5]
self.keys = list(map(int, keys.split(' ')))
self.con_type = TableConstraint.types[con_type] # Convert to enum
if self.con_type == TableConstraint.TypeCheck:
self.check_src = row[5]
elif self.con_type == TableConstraint.TypeForeignKey:
self.foreign_table = row[6]
self.foreign_on_update = TableConstraint.on_action[row[7]]
self.foreign_on_delete = TableConstraint.on_action[row[8]]
self.foreign_match_type = TableConstraint.match_types[row[9]]
self.foreign_keys = row[10]
class TableIndex(object):
def __init__(self, row):
(self.name, columns) = row
self.columns = list(map(int, columns.split(' ')))
class TableField(object):
def __init__(self, name, data_type, is_null=None, default=None,
modifier=None):
(self.name, self.data_type, self.is_null, self.default,
self.modifier) = (name, data_type, is_null, default, modifier)
def is_null_txt(self):
if self.is_null:
return 'NULL'
else:
return 'NOT NULL'
def field_def(self):
"""Return field definition as used for CREATE TABLE or
ALTER TABLE command.
"""
data_type = (self.data_type if not self.modifier or self.modifier <
0 else '%s(%d)' % (self.data_type, self.modifier))
txt = '%s %s %s' % (self._quote(self.name), data_type,
self.is_null_txt())
if self.default and len(self.default) > 0:
txt += ' DEFAULT %s' % self.default
return txt
def _quote(self, ident):
if re.match(r"^\w+$", ident) is not None:
return ident
else:
return '"%s"' % ident.replace('"', '""')
class GeoDB(object):
@classmethod
def from_name(cls, conn_name):
uri = uri_from_name(conn_name)
return cls(uri=uri)
def __init__(self, host=None, port=None, dbname=None, user=None,
passwd=None, service=None, uri=None):
# Regular expression for identifiers without need to quote them
self.re_ident_ok = re.compile(r"^\w+$")
port = str(port)
if uri:
self.uri = uri
else:
self.uri = QgsDataSourceUri()
if service:
self.uri.setConnection(service, dbname, user, passwd)
else:
self.uri.setConnection(host, port, dbname, user, passwd)
conninfo = self.uri.connectionInfo(False)
err = None
for i in range(4):
expandedConnInfo = self.uri.connectionInfo(True)
try:
self.con = psycopg2.connect(expandedConnInfo)
if err is not None:
QgsCredentials.instance().put(conninfo,
self.uri.username(),
self.uri.password())
break
except psycopg2.OperationalError as e:
if i == 3:
raise QgsProcessingException(str(e))
err = str(e)
user = self.uri.username()
password = self.uri.password()
(ok, user, password) = QgsCredentials.instance().get(conninfo,
user,
password,
err)
if not ok:
raise QgsProcessingException(QCoreApplication.translate("PostGIS", 'Action canceled by user'))
if user:
self.uri.setUsername(user)
if password:
self.uri.setPassword(password)
finally:
# remove certs (if any) of the expanded connectionInfo
expandedUri = QgsDataSourceUri(expandedConnInfo)
sslCertFile = expandedUri.param("sslcert")
if sslCertFile:
sslCertFile = sslCertFile.replace("'", "")
os.remove(sslCertFile)
sslKeyFile = expandedUri.param("sslkey")
if sslKeyFile:
sslKeyFile = sslKeyFile.replace("'", "")
os.remove(sslKeyFile)
sslCAFile = expandedUri.param("sslrootcert")
if sslCAFile:
sslCAFile = sslCAFile.replace("'", "")
os.remove(sslCAFile)
self.has_postgis = self.check_postgis()
def get_info(self):
c = self.con.cursor()
self._exec_sql(c, 'SELECT version()')
return c.fetchone()[0]
def check_postgis(self):
"""Check whether postgis_version is present in catalog.
"""
c = self.con.cursor()
self._exec_sql(c,
"SELECT COUNT(*) FROM pg_proc WHERE proname = 'postgis_version'")
return c.fetchone()[0] > 0
def get_postgis_info(self):
"""Returns tuple about PostGIS support:
- lib version
- installed scripts version
- released scripts version
- geos version
- proj version
- whether uses stats
"""
c = self.con.cursor()
self._exec_sql(c,
'SELECT postgis_lib_version(), postgis_scripts_installed(), \
postgis_scripts_released(), postgis_geos_version(), \
postgis_proj_version(), postgis_uses_stats()')
return c.fetchone()
def list_schemas(self):
"""Get list of schemas in tuples: (oid, name, owner, perms).
"""
c = self.con.cursor()
sql = "SELECT oid, nspname, pg_get_userbyid(nspowner), nspacl \
FROM pg_namespace \
WHERE nspname !~ '^pg_' AND nspname != 'information_schema'"
self._exec_sql(c, sql)
return c.fetchall()
def list_geotables(self, schema=None):
"""Get list of tables with schemas, whether user has privileges,
whether table has geometry column(s) etc.
Geometry_columns:
- f_table_schema
- f_table_name
- f_geometry_column
- coord_dimension
- srid
- type
"""
c = self.con.cursor()
if schema:
schema_where = " AND nspname = '%s' " % self._quote_unicode(schema)
else:
schema_where = \
" AND (nspname != 'information_schema' AND nspname !~ 'pg_') "
# LEFT OUTER JOIN: like LEFT JOIN but if there are more matches,
# for join, all are used (not only one)
# First find out whether PostGIS is enabled
if not self.has_postgis:
# Get all tables and views
sql = """SELECT pg_class.relname, pg_namespace.nspname,
pg_class.relkind, pg_get_userbyid(relowner),
reltuples, relpages, NULL, NULL, NULL, NULL
FROM pg_class
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
WHERE pg_class.relkind IN ('v', 'r', 'm', 'p')""" \
+ schema_where + 'ORDER BY nspname, relname'
else:
# Discovery of all tables and whether they contain a
# geometry column
sql = """SELECT pg_class.relname, pg_namespace.nspname,
pg_class.relkind, pg_get_userbyid(relowner),
reltuples, relpages, pg_attribute.attname,
pg_attribute.atttypid::regtype, NULL, NULL
FROM pg_class
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
LEFT OUTER JOIN pg_attribute ON
pg_attribute.attrelid = pg_class.oid AND
(pg_attribute.atttypid = 'geometry'::regtype
OR pg_attribute.atttypid IN
(SELECT oid FROM pg_type
WHERE typbasetype='geometry'::regtype))
WHERE pg_class.relkind IN ('v', 'r', 'm', 'p') """ \
+ schema_where + 'ORDER BY nspname, relname, attname'
self._exec_sql(c, sql)
items = c.fetchall()
# Get geometry info from geometry_columns if exists
if self.has_postgis:
sql = """SELECT relname, nspname, relkind,
pg_get_userbyid(relowner), reltuples, relpages,
geometry_columns.f_geometry_column,
geometry_columns.type,
geometry_columns.coord_dimension,
geometry_columns.srid
FROM pg_class
JOIN pg_namespace ON relnamespace=pg_namespace.oid
LEFT OUTER JOIN geometry_columns ON
relname=f_table_name AND nspname=f_table_schema
WHERE relkind IN ('r','v','m','p') """ \
+ schema_where + 'ORDER BY nspname, relname, \
f_geometry_column'
self._exec_sql(c, sql)
# Merge geometry info to "items"
for (i, geo_item) in enumerate(c.fetchall()):
if geo_item[7]:
items[i] = geo_item
return items
def get_table_rows(self, table, schema=None):
c = self.con.cursor()
self._exec_sql(c, 'SELECT COUNT(*) FROM %s' % self._table_name(schema,
table))
return c.fetchone()[0]
def get_table_fields(self, table, schema=None):
"""Return list of columns in table"""
c = self.con.cursor()
schema_where = (" AND nspname='%s' "
% self._quote_unicode(schema) if schema is not None else ''
)
sql = """SELECT a.attnum AS ordinal_position,
a.attname AS column_name,
t.typname AS data_type,
a.attlen AS char_max_len,
a.atttypmod AS modifier,
a.attnotnull AS notnull,
a.atthasdef AS hasdefault,
adef.adsrc AS default_value
FROM pg_class c
JOIN pg_attribute a ON a.attrelid = c.oid
JOIN pg_type t ON a.atttypid = t.oid
JOIN pg_namespace nsp ON c.relnamespace = nsp.oid
LEFT JOIN pg_attrdef adef ON adef.adrelid = a.attrelid
AND adef.adnum = a.attnum
WHERE
c.relname = '%s' %s AND
a.attnum > 0
ORDER BY a.attnum""" \
% (self._quote_unicode(table), schema_where)
self._exec_sql(c, sql)
attrs = []
for row in c.fetchall():
attrs.append(TableAttribute(row))
return attrs
def get_table_indexes(self, table, schema=None):
"""Get info about table's indexes. ignore primary key and unique
index, they get listed in constraints.
"""
c = self.con.cursor()
schema_where = (" AND nspname='%s' "
% self._quote_unicode(schema) if schema is not None else ''
)
sql = """SELECT relname, indkey
FROM pg_class, pg_index
WHERE pg_class.oid = pg_index.indexrelid AND pg_class.oid IN (
SELECT indexrelid
FROM pg_index, pg_class
JOIN pg_namespace nsp ON pg_class.relnamespace = nsp.oid
WHERE pg_class.relname='%s' %s AND
pg_class.oid=pg_index.indrelid
AND indisunique != 't' AND indisprimary != 't' )""" \
% (self._quote_unicode(table), schema_where)
self._exec_sql(c, sql)
indexes = []
for row in c.fetchall():
indexes.append(TableIndex(row))
return indexes
def get_table_constraints(self, table, schema=None):
c = self.con.cursor()
schema_where = (" AND nspname='%s' "
% self._quote_unicode(schema) if schema is not None else ''
)
sql = """SELECT c.conname, c.contype, c.condeferrable, c.condeferred,
array_to_string(c.conkey, ' '), c.consrc, t2.relname,
c.confupdtype, c.confdeltype, c.confmatchtype,
array_to_string(c.confkey, ' ')
FROM pg_constraint c
LEFT JOIN pg_class t ON c.conrelid = t.oid
LEFT JOIN pg_class t2 ON c.confrelid = t2.oid
JOIN pg_namespace nsp ON t.relnamespace = nsp.oid
WHERE t.relname = '%s' %s """ \
% (self._quote_unicode(table), schema_where)
self._exec_sql(c, sql)
constrs = []
for row in c.fetchall():
constrs.append(TableConstraint(row))
return constrs
def get_view_definition(self, view, schema=None):
"""Returns definition of the view."""
schema_where = (" AND nspname='%s' "
% self._quote_unicode(schema) if schema is not None else ''
)
sql = """SELECT pg_get_viewdef(c.oid)
FROM pg_class c
JOIN pg_namespace nsp ON c.relnamespace = nsp.oid
WHERE relname='%s' %s AND relkind IN ('v','m')""" \
% (self._quote_unicode(view), schema_where)
c = self.con.cursor()
self._exec_sql(c, sql)
return c.fetchone()[0]
def add_geometry_column(self, table, geom_type, schema=None,
geom_column='the_geom', srid=-1, dim=2):
# Use schema if explicitly specified
if schema:
schema_part = "'%s', " % self._quote_unicode(schema)
else:
schema_part = ''
sql = "SELECT AddGeometryColumn(%s'%s', '%s', %d, '%s', %d)" % (
schema_part,
self._quote_unicode(table),
self._quote_unicode(geom_column),
srid,
self._quote_unicode(geom_type),
dim,
)
self._exec_sql_and_commit(sql)
def delete_geometry_column(self, table, geom_column, schema=None):
"""Use PostGIS function to delete geometry column correctly."""
if schema:
schema_part = "'%s', " % self._quote_unicode(schema)
else:
schema_part = ''
sql = "SELECT DropGeometryColumn(%s'%s', '%s')" % (schema_part,
self._quote_unicode(table), self._quote_unicode(geom_column))
self._exec_sql_and_commit(sql)
def delete_geometry_table(self, table, schema=None):
"""Delete table with one or more geometries using PostGIS function."""
if schema:
schema_part = "'%s', " % self._quote_unicode(schema)
else:
schema_part = ''
sql = "SELECT DropGeometryTable(%s'%s')" % (schema_part,
self._quote_unicode(table))
self._exec_sql_and_commit(sql)
def create_table(self, table, fields, pkey=None, schema=None):
"""Create ordinary table.
'fields' is array containing instances of TableField
'pkey' contains name of column to be used as primary key
"""
if len(fields) == 0:
return False
table_name = self._table_name(schema, table)
sql = 'CREATE TABLE %s (%s' % (table_name, fields[0].field_def())
for field in fields[1:]:
sql += ', %s' % field.field_def()
if pkey:
sql += ', PRIMARY KEY (%s)' % self._quote(pkey)
sql += ')'
self._exec_sql_and_commit(sql)
return True
def delete_table(self, table, schema=None):
"""Delete table from the database."""
table_name = self._table_name(schema, table)
sql = 'DROP TABLE %s' % table_name
self._exec_sql_and_commit(sql)
def empty_table(self, table, schema=None):
"""Delete all rows from table."""
table_name = self._table_name(schema, table)
sql = 'DELETE FROM %s' % table_name
self._exec_sql_and_commit(sql)
def rename_table(self, table, new_table, schema=None):
"""Rename a table in database."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s RENAME TO %s' % (table_name,
self._quote(new_table))
self._exec_sql_and_commit(sql)
# Update geometry_columns if PostGIS is enabled
if self.has_postgis:
sql = "UPDATE geometry_columns SET f_table_name='%s' \
WHERE f_table_name='%s'" \
% (self._quote_unicode(new_table), self._quote_unicode(table))
if schema is not None:
sql += " AND f_table_schema='%s'" % self._quote_unicode(schema)
self._exec_sql_and_commit(sql)
def create_view(self, name, query, schema=None):
view_name = self._table_name(schema, name)
sql = 'CREATE VIEW %s AS %s' % (view_name, query)
self._exec_sql_and_commit(sql)
def delete_view(self, name, schema=None):
view_name = self._table_name(schema, name)
sql = 'DROP VIEW %s' % view_name
self._exec_sql_and_commit(sql)
def rename_view(self, name, new_name, schema=None):
"""Rename view in database."""
self.rename_table(name, new_name, schema)
def create_schema(self, schema):
"""Create a new empty schema in database."""
sql = 'CREATE SCHEMA %s' % self._quote(schema)
self._exec_sql_and_commit(sql)
def delete_schema(self, schema):
"""Drop (empty) schema from database."""
sql = 'DROP SCHEMA %s' % self._quote(schema)
self._exec_sql_and_commit(sql)
def rename_schema(self, schema, new_schema):
"""Rename a schema in database."""
sql = 'ALTER SCHEMA %s RENAME TO %s' % (self._quote(schema),
self._quote(new_schema))
self._exec_sql_and_commit(sql)
# Update geometry_columns if PostGIS is enabled
if self.has_postgis:
sql = \
"UPDATE geometry_columns SET f_table_schema='%s' \
WHERE f_table_schema='%s'" \
% (self._quote_unicode(new_schema), self._quote_unicode(schema))
self._exec_sql_and_commit(sql)
def table_add_column(self, table, field, schema=None):
"""Add a column to table (passed as TableField instance)."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s ADD %s' % (table_name, field.field_def())
self._exec_sql_and_commit(sql)
def table_delete_column(self, table, field, schema=None):
"""Delete column from a table."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s DROP %s' % (table_name, self._quote(field))
self._exec_sql_and_commit(sql)
def table_column_rename(self, table, name, new_name, schema=None):
"""Rename column in a table."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s RENAME %s TO %s' % (table_name,
self._quote(name), self._quote(new_name))
self._exec_sql_and_commit(sql)
# Update geometry_columns if PostGIS is enabled
if self.has_postgis:
sql = "UPDATE geometry_columns SET f_geometry_column='%s' \
WHERE f_geometry_column='%s' AND f_table_name='%s'" \
% (self._quote_unicode(new_name), self._quote_unicode(name),
self._quote_unicode(table))
if schema is not None:
sql += " AND f_table_schema='%s'" % self._quote(schema)
self._exec_sql_and_commit(sql)
def table_column_set_type(self, table, column, data_type, schema=None):
"""Change column type."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s ALTER %s TYPE %s' % (table_name,
self._quote(column), data_type)
self._exec_sql_and_commit(sql)
def table_column_set_default(self, table, column, default, schema=None):
"""Change column's default value.
If default=None drop default value.
"""
table_name = self._table_name(schema, table)
if default:
sql = 'ALTER TABLE %s ALTER %s SET DEFAULT %s' % (table_name,
self._quote(column), default)
else:
sql = 'ALTER TABLE %s ALTER %s DROP DEFAULT' % (table_name,
self._quote(column))
self._exec_sql_and_commit(sql)
def table_column_set_null(self, table, column, is_null, schema=None):
"""Change whether column can contain null values."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s ALTER %s ' % (table_name, self._quote(column))
if is_null:
sql += 'DROP NOT NULL'
else:
sql += 'SET NOT NULL'
self._exec_sql_and_commit(sql)
def table_add_primary_key(self, table, column, schema=None):
"""Add a primery key (with one column) to a table."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s ADD PRIMARY KEY (%s)' % (table_name,
self._quote(column))
self._exec_sql_and_commit(sql)
def table_add_unique_constraint(self, table, column, schema=None):
"""Add a unique constraint to a table."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s ADD UNIQUE (%s)' % (table_name,
self._quote(column))
self._exec_sql_and_commit(sql)
def table_delete_constraint(self, table, constraint, schema=None):
"""Delete constraint in a table."""
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s DROP CONSTRAINT %s' % (table_name,
self._quote(constraint))
self._exec_sql_and_commit(sql)
def table_move_to_schema(self, table, new_schema, schema=None):
if new_schema == schema:
return
table_name = self._table_name(schema, table)
sql = 'ALTER TABLE %s SET SCHEMA %s' % (table_name,
self._quote(new_schema))
self._exec_sql_and_commit(sql)
# Update geometry_columns if PostGIS is enabled
if self.has_postgis:
sql = "UPDATE geometry_columns SET f_table_schema='%s' \
WHERE f_table_name='%s'" \
% (self._quote_unicode(new_schema), self._quote_unicode(table))
if schema is not None:
sql += " AND f_table_schema='%s'" % self._quote_unicode(schema)
self._exec_sql_and_commit(sql)
def create_index(self, table, name, column, schema=None):
"""Create index on one column using default options."""
table_name = self._table_name(schema, table)
idx_name = self._quote(name)
sql = 'CREATE INDEX %s ON %s (%s)' % (idx_name, table_name,
self._quote(column))
self._exec_sql_and_commit(sql)
def create_spatial_index(self, table, schema=None, geom_column='the_geom'):
table_name = self._table_name(schema, table)
idx_name = self._quote(u"sidx_%s_%s" % (table, geom_column))
sql = 'CREATE INDEX %s ON %s USING GIST(%s)' % (idx_name, table_name,
self._quote(geom_column))
self._exec_sql_and_commit(sql)
def delete_index(self, name, schema=None):
index_name = self._table_name(schema, name)
sql = 'DROP INDEX %s' % index_name
self._exec_sql_and_commit(sql)
def get_database_privileges(self):
"""DB privileges: (can create schemas, can create temp. tables).
"""
sql = "SELECT has_database_privilege('%(d)s', 'CREATE'), \
has_database_privilege('%(d)s', 'TEMP')" \
% {'d': self._quote_unicode(self.uri.database())}
c = self.con.cursor()
self._exec_sql(c, sql)
return c.fetchone()
def get_schema_privileges(self, schema):
"""Schema privileges: (can create new objects, can access objects
in schema)."""
sql = "SELECT has_schema_privilege('%(s)s', 'CREATE'), \
has_schema_privilege('%(s)s', 'USAGE')" \
% {'s': self._quote_unicode(schema)}
c = self.con.cursor()
self._exec_sql(c, sql)
return c.fetchone()
def get_table_privileges(self, table, schema=None):
"""Table privileges: (select, insert, update, delete).
"""
t = self._table_name(schema, table)
sql = """SELECT has_table_privilege('%(t)s', 'SELECT'),
has_table_privilege('%(t)s', 'INSERT'),
has_table_privilege('%(t)s', 'UPDATE'),
has_table_privilege('%(t)s', 'DELETE')""" \
% {'t': self._quote_unicode(t)}
c = self.con.cursor()
self._exec_sql(c, sql)
return c.fetchone()
def vacuum_analyze(self, table, schema=None):
"""Run VACUUM ANALYZE on a table."""
t = self._table_name(schema, table)
# VACUUM ANALYZE must be run outside transaction block - we
# have to change isolation level
self.con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = self.con.cursor()
self._exec_sql(c, 'VACUUM ANALYZE %s' % t)
self.con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def sr_info_for_srid(self, srid):
if not self.has_postgis:
return 'Unknown'
try:
c = self.con.cursor()
self._exec_sql(c,
"SELECT srtext FROM spatial_ref_sys WHERE srid = '%d'"
% srid)
srtext = c.fetchone()[0]
# Try to extract just SR name (should be quoted in double
# quotes)
x = re.search('"([^"]+)"', srtext)
if x is not None:
srtext = x.group()
return srtext
except DbError:
return 'Unknown'
def insert_table_row(self, table, values, schema=None, cursor=None):
"""Insert a row with specified values to a table.
If a cursor is specified, it doesn't commit (expecting that
there will be more inserts) otherwise it commits immediately.
"""
t = self._table_name(schema, table)
sql = ''
for value in values:
# TODO: quote values?
if sql:
sql += ', '
sql += value
sql = 'INSERT INTO %s VALUES (%s)' % (t, sql)
if cursor:
self._exec_sql(cursor, sql)
else:
self._exec_sql_and_commit(sql)
def _exec_sql(self, cursor, sql):
try:
cursor.execute(sql)
except psycopg2.Error as e:
raise QgsProcessingException(str(e) + ' QUERY: ' +
e.cursor.query.decode(e.cursor.connection.encoding))
def _exec_sql_and_commit(self, sql):
"""Tries to execute and commit some action, on error it rolls
back the change.
"""
try:
c = self.con.cursor()
self._exec_sql(c, sql)
self.con.commit()
except DbError:
self.con.rollback()
raise
def _quote(self, identifier):
"""Quote identifier if needed."""
# Make sure it's python unicode string
identifier = str(identifier)
# Is it needed to quote the identifier?
if self.re_ident_ok.match(identifier) is not None:
return identifier
# It's needed - let's quote it (and double the double-quotes)
return u'"%s"' % identifier.replace('"', '""')
def _quote_unicode(self, txt):
"""Make the string safe - replace ' with ''.
"""
# make sure it's python unicode string
txt = str(txt)
return txt.replace("'", "''")
def _table_name(self, schema, table):
if not schema:
return self._quote(table)
else:
return u'%s.%s' % (self._quote(schema), self._quote(table))
# For debugging / testing
if __name__ == '__main__':
db = GeoDB(host='localhost', dbname='gis', user='gisak', passwd='g')
# fix_print_with_import
print(db.list_schemas())
# fix_print_with_import
print('==========')
for row in db.list_geotables():
# fix_print_with_import
print(row)
# fix_print_with_import
print('==========')
for row in db.get_table_indexes('trencin'):
# fix_print_with_import
print(row)
# fix_print_with_import
print('==========')
for row in db.get_table_constraints('trencin'):
# fix_print_with_import
print(row)
# fix_print_with_import
print('==========')
# fix_print_with_import
print(db.get_table_rows('trencin'))
# for fld in db.get_table_metadata('trencin'):
# ....print fld
# try:
# ....db.create_table('trrrr', [('id','serial'), ('test','text')])
# except DbError, e:
# ....print unicode(e), e.query
|
gpagliuca/pyfas
|
refs/heads/master
|
build/lib/pyfas/test/test_tpl.py
|
2
|
import os
import sys
import pytest
import xlrd
import tempfile
from pyfas import Tpl
TEST_FLD = os.getcwd() + os.sep + "test_files" + os.sep
def test_not_a_tpl():
with pytest.raises(ValueError) as exeinfo:
tpl = Tpl(TEST_FLD+"/FC1_rev01.ppl")
assert exinfo.value.message == "not a tpl file"
def test_init_same_folder():
tpl = Tpl("tpl_file.tpl")
assert tpl.path == ''
assert tpl.fname == "tpl_file.tpl"
def test_init():
tpl = Tpl(TEST_FLD+"FC1_rev01.tpl")
assert tpl.fname == "FC1_rev01.tpl"
assert tpl.path == TEST_FLD[:-1]
def test_attributes():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
assert tpl._attributes['CATALOG'] == 331
assert tpl._attributes['data_idx'] == 421
assert 'VOLGB' in tpl.trends[1]
def test_extraction_preprocessor():
tpl = Tpl(TEST_FLD+"/2016_1_Legacy.tpl")
tpl.extract(4)
assert tpl.data[4][0] == 487.87419999999997
assert 'OILC' in tpl.label[4]
def test_extraction():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
tpl.extract(3)
assert tpl.data[3][0] == 9.973410e6
assert 'Pressure' in tpl.label[3]
def test_multiple_extraction():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
tpl.extract(3, 4, 5)
assert tpl.data[3][0] == 9.973410e6
assert 'Pressure' in tpl.label[3]
assert tpl.data[4][0] == 1.291370e1
assert 'temperature' in tpl.label[4]
assert tpl.data[5][0] == 1.00000000
assert 'Holdup' in tpl.label[5]
def test_filter():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
PTs = tpl.filter_trends('PT')
assert 'PT' in PTs[3]
assert 'POSITION' in PTs[3]
assert 'TIEIN' in PTs[3]
tpl.trends
assert 'VOLGB' in tpl.trends[1]
def test_to_excel():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
tpl.to_excel()
assert "FC1_rev01_tpl.xlsx" in os.listdir(TEST_FLD)
xl = xlrd.open_workbook(TEST_FLD+"/FC1_rev01_tpl.xlsx")
sh = xl.sheet_by_index(0)
assert sh.cell_value(3, 4) == 9.973300e+06
os.remove(TEST_FLD+"/FC1_rev01_tpl.xlsx")
temp_folder = tempfile.gettempdir()
tpl.to_excel(temp_folder)
assert "FC1_rev01_tpl.xlsx" in os.listdir(temp_folder)
os.remove(temp_folder+"/FC1_rev01_tpl.xlsx")
def test_view_trends():
tpl = Tpl(TEST_FLD+"/FC1_rev01.tpl")
df = tpl.view_trends()
df = tpl.view_trends('HOL')
assert df['Index'][4] == 36
assert df['Position'][4] == 'POSITION - VENT_LINE'
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/gdata/src/gdata/youtube/service.py
|
141
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YouTubeService extends GDataService to streamline YouTube operations.
YouTubeService: Provides methods to perform CRUD operations on YouTube feeds.
Extends GDataService.
"""
__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), '
'api.jhartmann@gmail.com (Jochen Hartmann)')
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import os
import atom
import gdata
import gdata.service
import gdata.youtube
YOUTUBE_SERVER = 'gdata.youtube.com'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users'
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos'
YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users'
YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists'
YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds'
YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated')
YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_viewed')
YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'recently_featured')
YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'watch_on_mobile')
YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'top_favorites')
YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_recent')
YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_discussed')
YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_linked')
YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_responded')
YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas'
YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA
YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'complaint-reasons.cat')
YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'subscriptiontypes.cat')
YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS',
'RIGHTS', 'SPAM')
YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected')
YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family')
UNKOWN_ERROR = 1000
YOUTUBE_BAD_REQUEST = 400
YOUTUBE_CONFLICT = 409
YOUTUBE_INTERNAL_SERVER_ERROR = 500
YOUTUBE_INVALID_ARGUMENT = 601
YOUTUBE_INVALID_CONTENT_TYPE = 602
YOUTUBE_NOT_A_VIDEO = 603
YOUTUBE_INVALID_KIND = 604
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeService(gdata.service.GDataService):
"""Client for the YouTube service.
Performs all documented Google Data YouTube API functions, such as inserting,
updating and deleting videos, comments, playlist, subscriptions etc.
YouTube Service requires authentication for any write, update or delete
actions.
Attributes:
email: An optional string identifying the user. Required only for
authenticated actions.
password: An optional string identifying the user's password.
source: An optional string identifying the name of your application.
server: An optional address of the YouTube API server. gdata.youtube.com
is provided as the default value.
additional_headers: An optional dictionary containing additional headers
to be passed along with each request. Use to store developer key.
client_id: An optional string identifying your application, required for
authenticated requests, along with a developer key.
developer_key: An optional string value. Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
"""
def __init__(self, email=None, password=None, source=None,
server=YOUTUBE_SERVER, additional_headers=None, client_id=None,
developer_key=None, **kwargs):
"""Creates a client for the YouTube service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'gdata.youtube.com'.
client_id: string (optional) Identifies your application, required for
authenticated requests, along with a developer key.
developer_key: string (optional) Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
if developer_key and not client_id:
raise YouTubeError('You must also specify the clientId')
gdata.service.GDataService.__init__(
self, email=email, password=password, service=YOUTUBE_SERVICE,
source=source, server=server, additional_headers=additional_headers,
**kwargs)
if client_id is not None and developer_key is not None:
self.additional_headers['X-Gdata-Client'] = client_id
self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key
self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL
def GetYouTubeVideoFeed(self, uri):
"""Retrieve a YouTubeVideoFeed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetYouTubeVideoEntry(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoEntry() method')
elif video_id and not uri:
uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id)
return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString)
def GetYouTubeContactFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeContactFeed.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the contact feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubeContactFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeContactFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts')
return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString)
def GetYouTubeContactEntry(self, uri):
"""Retrieve a YouTubeContactEntry.
Args:
uri: A string representing the URI of the contact entry that is to
be retrieved.
Returns:
A YouTubeContactEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString)
def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoCommentFeed.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the comment feed that
is to be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the comment feed.
Returns:
A YouTubeVideoCommentFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoCommentFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoCommentFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString)
def GetYouTubeVideoCommentEntry(self, uri):
"""Retrieve a YouTubeVideoCommentEntry.
Args:
uri: A string representing the URI of the comment entry that is to
be retrieved.
Returns:
A YouTubeCommentEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString)
def GetYouTubeUserFeed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString)
def GetYouTubeUserEntry(self, uri=None, username=None):
"""Retrieve a YouTubeUserEntry.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the user entry that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserEntry if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserEntry() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserEntry() method')
elif username and not uri:
uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username)
return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString)
def GetYouTubePlaylistFeed(self, uri=None, username='default'):
"""Retrieve a YouTubePlaylistFeed (a feed of playlists for a user).
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the playlist feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubePlaylistFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubePlaylistFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists')
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString)
def GetYouTubePlaylistEntry(self, uri):
"""Retrieve a YouTubePlaylistEntry.
Args:
uri: A string representing the URI of the playlist feed that is to
be retrieved.
Returns:
A YouTubePlaylistEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString)
def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None):
"""Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist).
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the playlist video feed
that is to be retrieved.
playlist_id: An optional string representing the Id of the playlist whose
playlist video feed is to be retrieved.
Returns:
A YouTubePlaylistVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a playlist_id to the
GetYouTubePlaylistVideoFeed() method.
"""
if uri is None and playlist_id is None:
raise YouTubeError('You must provide at least a uri or a playlist_id '
'to the GetYouTubePlaylistVideoFeed() method')
elif playlist_id and not uri:
uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id)
return self.Get(
uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString)
def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoResponseFeed.
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the video response feed
that is to be retrieved.
video_id: An optional string representing the ID of the video whose
response feed is to be retrieved.
Returns:
A YouTubeVideoResponseFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoResponseFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoResponseFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString)
def GetYouTubeVideoResponseEntry(self, uri):
"""Retrieve a YouTubeVideoResponseEntry.
Args:
uri: A string representing the URI of the video response entry that
is to be retrieved.
Returns:
A YouTubeVideoResponseEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString)
def GetYouTubeSubscriptionFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeSubscriptionFeed.
Either the uri of the feed or a username must be provided.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
username: An optional string representing the username whose subscription
feed is to be retrieved. Defaults to the currently authenticted user.
Returns:
A YouTubeVideoSubscriptionFeed if successfully retrieved.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions')
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString)
def GetYouTubeSubscriptionEntry(self, uri):
"""Retrieve a YouTubeSubscriptionEntry.
Args:
uri: A string representing the URI of the entry that is to be retrieved.
Returns:
A YouTubeVideoSubscriptionEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeRelatedVideoFeed.
Either a uri for the feed or a video_id is required.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the related video feed.
Returns:
A YouTubeRelatedVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeRelatedVideoFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeRelatedVideoFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetTopRatedVideoFeed(self):
"""Retrieve the 'top_rated' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI)
def GetMostViewedVideoFeed(self):
"""Retrieve the 'most_viewed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI)
def GetRecentlyFeaturedVideoFeed(self):
"""Retrieve the 'recently_featured' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI)
def GetWatchOnMobileVideoFeed(self):
"""Retrieve the 'watch_on_mobile' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI)
def GetTopFavoritesVideoFeed(self):
"""Retrieve the 'top_favorites' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI)
def GetMostRecentVideoFeed(self):
"""Retrieve the 'most_recent' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI)
def GetMostDiscussedVideoFeed(self):
"""Retrieve the 'most_discussed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI)
def GetMostLinkedVideoFeed(self):
"""Retrieve the 'most_linked' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI)
def GetMostRespondedVideoFeed(self):
"""Retrieve the 'most_responded' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI)
def GetUserFavoritesFeed(self, username='default'):
"""Retrieve the favorites feed for a given user.
Args:
username: An optional string representing the username whose favorites
feed is to be retrieved. Defaults to the currently authenticated user.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username,
'favorites')
return self.GetYouTubeVideoFeed(favorites_feed_uri)
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""Upload a new video to YouTube using the direct upload mechanism.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload.
filename_or_handle: A file-like object or file name where the video
will be read from.
youtube_username: An optional string representing the username into whose
account this video is to be uploaded to. Defaults to the currently
authenticated user.
content_type: An optional string representing internet media type
(a.k.a. mime type) of the media object. Currently the YouTube API
supports these types:
o video/mpeg
o video/quicktime
o video/x-msvideo
o video/mp4
o video/x-flv
Returns:
The newly created YouTubeVideoEntry if successful.
Raises:
AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
YouTubeError: An error occurred trying to read the video file provided.
gdata.service.RequestError: An error occurred trying to upload the video
to the API server.
"""
# We need to perform a series of checks on the video_entry and on the
# file that we plan to upload, such as checking whether we have a valid
# video_entry and that the file is the correct type and readable, prior
# to performing the actual POST request.
try:
assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
except AssertionError:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT,
'body':'`video_entry` must be a gdata.youtube.VideoEntry instance',
'reason':'Found %s, not VideoEntry' % type(video_entry)
})
majtype, mintype = content_type.split('/')
try:
assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' %
['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]})
if (isinstance(filename_or_handle, (str, unicode))
and os.path.exists(filename_or_handle)):
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):
import StringIO
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0)
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'video'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':
'`filename_or_handle` must be a path name or a file-like object',
'reason': ('Found %s, not path name or object '
'with a .read() method' % type(filename_or_handle))})
upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username,
'uploads')
self.additional_headers['Slug'] = mediasource.file_name
# Using a nested try statement to retain Python 2.4 compatibility
try:
try:
return self.Post(video_entry, uri=upload_uri, media_source=mediasource,
converter=gdata.youtube.YouTubeVideoEntryFromString)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
finally:
del(self.additional_headers['Slug'])
def CheckUploadStatus(self, video_entry=None, video_id=None):
"""Check upload status on a recently uploaded video entry.
Needs authentication. Either video_entry or video_id must be provided.
Args:
video_entry: An optional YouTubeVideoEntry whose upload status to check
video_id: An optional string representing the ID of the uploaded video
whose status is to be checked.
Returns:
A tuple containing (video_upload_state, detailed_message) or None if
no status information is found.
Raises:
YouTubeError: You must provide at least a video_entry or a video_id to the
CheckUploadStatus() method.
"""
if video_entry is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the CheckUploadStatus() method')
elif video_id and not video_entry:
video_entry = self.GetYouTubeVideoEntry(video_id=video_id)
control = video_entry.control
if control is not None:
draft = control.draft
if draft is not None:
if draft.text == 'yes':
yt_state = control.extension_elements[0]
if yt_state is not None:
state_value = yt_state.attributes['name']
message = ''
if yt_state.text is not None:
message = yt_state.text
return (state_value, message)
def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI):
"""Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload (meta-data only).
uri: An optional string representing the URI from where to fetch the
token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI.
Returns:
A tuple containing the URL to which to post your video file, along
with the youtube token that must be included with your upload in the
form of: (post_url, youtube_token).
"""
try:
response = self.Post(video_entry, uri)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
tree = ElementTree.fromstring(response)
for child in tree:
if child.tag == 'url':
post_url = child.text
elif child.tag == 'token':
youtube_token = child.text
return (post_url, youtube_token)
def UpdateVideoEntry(self, video_entry):
"""Updates a video entry's meta-data.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to update, containing updated
meta-data.
Returns:
An updated YouTubeVideoEntry on success or None.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Put(video_entry, uri=edit_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntry(self, video_entry):
"""Deletes a video entry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to be deleted.
Returns:
True if entry was deleted successfully.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Delete(edit_uri)
def AddRating(self, rating_value, video_entry):
"""Add a rating to a video entry.
Needs authentication.
Args:
rating_value: The integer value for the rating (between 1 and 5).
video_entry: The YouTubeVideoEntry to be rated.
Returns:
True if the rating was added successfully.
Raises:
YouTubeError: rating_value must be between 1 and 5 in AddRating().
"""
if rating_value < 1 or rating_value > 5:
raise YouTubeError('rating_value must be between 1 and 5 in AddRating()')
entry = gdata.GDataEntry()
rating = gdata.youtube.Rating(min='1', max='5')
rating.extension_attributes['name'] = 'value'
rating.extension_attributes['value'] = str(rating_value)
entry.extension_elements.append(rating)
for link in video_entry.link:
if link.rel == YOUTUBE_RATING_LINK_REL:
rating_uri = link.href
return self.Post(entry, uri=rating_uri)
def AddComment(self, comment_text, video_entry):
"""Add a comment to a video entry.
Needs authentication. Note that each comment that is posted must contain
the video entry that it is to be posted to.
Args:
comment_text: A string representing the text of the comment.
video_entry: The YouTubeVideoEntry to be commented on.
Returns:
True if the comment was added successfully.
"""
content = atom.Content(text=comment_text)
comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content)
comment_post_uri = video_entry.comments.feed_link[0].href
return self.Post(comment_entry, uri=comment_post_uri)
def AddVideoResponse(self, video_id_to_respond_to, video_response):
"""Add a video response.
Needs authentication.
Args:
video_id_to_respond_to: A string representing the ID of the video to be
responded to.
video_response: YouTubeVideoEntry to be posted as a response.
Returns:
True if video response was posted successfully.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to,
'responses')
return self.Post(video_response, uri=post_uri)
def DeleteVideoResponse(self, video_id, response_video_id):
"""Delete a video response.
Needs authentication.
Args:
video_id: A string representing the ID of video that contains the
response.
response_video_id: A string representing the ID of the video that was
posted as a response.
Returns:
True if video response was deleted succcessfully.
"""
delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses',
response_video_id)
return self.Delete(delete_uri)
def AddComplaint(self, complaint_text, complaint_term, video_id):
"""Add a complaint for a particular video entry.
Needs authentication.
Args:
complaint_text: A string representing the complaint text.
complaint_term: A string representing the complaint category term.
video_id: A string representing the ID of YouTubeVideoEntry to
complain about.
Returns:
True if posted successfully.
Raises:
YouTubeError: Your complaint_term is not valid.
"""
if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS:
raise YouTubeError('Your complaint_term is not valid')
content = atom.Content(text=complaint_text)
category = atom.Category(term=complaint_term,
scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME)
complaint_entry = gdata.GDataEntry(content=content, category=[category])
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints')
return self.Post(complaint_entry, post_uri)
def AddVideoEntryToFavorites(self, video_entry, username='default'):
"""Add a video entry to a users favorite feed.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to add.
username: An optional string representing the username to whose favorite
feed you wish to add the entry. Defaults to the currently
authenticated user.
Returns:
The posted YouTubeVideoEntry if successfully posted.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites')
return self.Post(video_entry, post_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntryFromFavorites(self, video_id, username='default'):
"""Delete a video entry from the users favorite feed.
Needs authentication.
Args:
video_id: A string representing the ID of the video that is to be removed
username: An optional string representing the username of the user's
favorite feed. Defaults to the currently authenticated user.
Returns:
True if entry was successfully deleted.
"""
edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites',
video_id)
return self.Delete(edit_link)
def AddPlaylist(self, playlist_title, playlist_description,
playlist_private=None):
"""Add a new playlist to the currently authenticated users account.
Needs authentication.
Args:
playlist_title: A string representing the title for the new playlist.
playlist_description: A string representing the description of the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
Returns:
The YouTubePlaylistEntry if successfully posted.
"""
playlist_entry = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=playlist_title),
description=gdata.youtube.Description(text=playlist_description))
if playlist_private:
playlist_entry.private = gdata.youtube.Private()
playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default',
'playlists')
return self.Post(playlist_entry, playlist_post_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def UpdatePlaylist(self, playlist_id, new_playlist_title,
new_playlist_description, playlist_private=None,
username='default'):
"""Update a playlist with new meta-data.
Needs authentication.
Args:
playlist_id: A string representing the ID of the playlist to be updated.
new_playlist_title: A string representing a new title for the playlist.
new_playlist_description: A string representing a new description for the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
username: An optional string representing the username whose playlist is
to be updated. Defaults to the currently authenticated user.
Returns:
A YouTubePlaylistEntry if the update was successful.
"""
updated_playlist = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=new_playlist_title),
description=gdata.youtube.Description(text=new_playlist_description))
if playlist_private:
updated_playlist.private = gdata.youtube.Private()
playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username,
playlist_id)
return self.Put(updated_playlist, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def DeletePlaylist(self, playlist_uri):
"""Delete a playlist from the currently authenticated users playlists.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that is
to be deleted.
Returns:
True if successfully deleted.
"""
return self.Delete(playlist_uri)
def AddPlaylistVideoEntryToPlaylist(
self, playlist_uri, video_id, custom_video_title=None,
custom_video_description=None):
"""Add a video entry to a playlist, optionally providing a custom title
and description.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist to which this
video entry is to be added.
video_id: A string representing the ID of the video entry to add.
custom_video_title: An optional string representing a custom title for
the video (only shown on the playlist).
custom_video_description: An optional string representing a custom
description for the video (only shown on the playlist).
Returns:
A YouTubePlaylistVideoEntry if successfully posted.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
atom_id=atom.Id(text=video_id))
if custom_video_title:
playlist_video_entry.title = atom.Title(text=custom_video_title)
if custom_video_description:
playlist_video_entry.description = gdata.youtube.Description(
text=custom_video_description)
return self.Post(playlist_video_entry, playlist_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def UpdatePlaylistVideoEntryMetaData(
self, playlist_uri, playlist_entry_id, new_video_title,
new_video_description, new_video_position):
"""Update the meta data for a YouTubePlaylistVideoEntry.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that contains
the entry to be updated.
playlist_entry_id: A string representing the ID of the entry to be
updated.
new_video_title: A string representing the new title for the video entry.
new_video_description: A string representing the new description for
the video entry.
new_video_position: An integer representing the new position on the
playlist for the video.
Returns:
A YouTubePlaylistVideoEntry if the update was successful.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
title=atom.Title(text=new_video_title),
description=gdata.youtube.Description(text=new_video_description),
position=gdata.youtube.Position(text=str(new_video_position)))
playlist_put_uri = playlist_uri + '/' + playlist_entry_id
return self.Put(playlist_video_entry, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id):
"""Delete a playlist video entry from a playlist.
Needs authentication.
Args:
playlist_uri: A URI representing the playlist from which the playlist
video entry is to be removed from.
playlist_video_entry_id: A string representing id of the playlist video
entry that is to be removed.
Returns:
True if entry was successfully deleted.
"""
delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id)
return self.Delete(delete_uri)
def AddSubscriptionToChannel(self, username_to_subscribe_to,
my_username = 'default'):
"""Add a new channel subscription to the currently authenticated users
account.
Needs authentication.
Args:
username_to_subscribe_to: A string representing the username of the
channel to which we want to subscribe to.
my_username: An optional string representing the name of the user which
we want to subscribe. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successfully posted.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='channel')
subscription_username = gdata.youtube.Username(
text=username_to_subscribe_to)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToFavorites(self, username, my_username = 'default'):
"""Add a new subscription to a users favorites to the currently
authenticated user's account.
Needs authentication
Args:
username: A string representing the username of the user's favorite feed
to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='favorites')
subscription_username = gdata.youtube.Username(text=username)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToQuery(self, query, my_username = 'default'):
"""Add a new subscription to a specific keyword query to the currently
authenticated user's account.
Needs authentication
Args:
query: A string representing the keyword query to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='query')
subscription_query_string = gdata.youtube.QueryString(text=query)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
query_string=subscription_query_string)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def DeleteSubscription(self, subscription_uri):
"""Delete a subscription from the currently authenticated user's account.
Needs authentication.
Args:
subscription_uri: A string representing the URI of the subscription that
is to be deleted.
Returns:
True if deleted successfully.
"""
return self.Delete(subscription_uri)
def AddContact(self, contact_username, my_username='default'):
"""Add a new contact to the currently authenticated user's contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that you wish to add.
my_username: An optional string representing the username to whose
contact the new contact is to be added.
Returns:
A YouTubeContactEntry if added successfully.
"""
contact_category = atom.Category(
scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat',
term = 'Friends')
contact_username = gdata.youtube.Username(text=contact_username)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
username=contact_username)
contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts')
return self.Post(contact_entry, contact_post_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def UpdateContact(self, contact_username, new_contact_status,
new_contact_category, my_username='default'):
"""Update a contact, providing a new status and a new category.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be updated.
new_contact_status: A string representing the new status of the contact.
This can either be set to 'accepted' or 'rejected'.
new_contact_category: A string representing the new category for the
contact, either 'Friends' or 'Family'.
my_username: An optional string representing the username of the user
whose contact feed we are modifying. Defaults to the currently
authenticated user.
Returns:
A YouTubeContactEntry if updated succesfully.
Raises:
YouTubeError: New contact status must be within the accepted values. Or
new contact category must be within the accepted categories.
"""
if new_contact_status not in YOUTUBE_CONTACT_STATUS:
raise YouTubeError('New contact status must be one of %s' %
(' '.join(YOUTUBE_CONTACT_STATUS)))
if new_contact_category not in YOUTUBE_CONTACT_CATEGORY:
raise YouTubeError('New contact category must be one of %s' %
(' '.join(YOUTUBE_CONTACT_CATEGORY)))
contact_category = atom.Category(
scheme='http://gdata.youtube.com/schemas/2007/contact.cat',
term=new_contact_category)
contact_status = gdata.youtube.Status(text=new_contact_status)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
status=contact_status)
contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Put(contact_entry, contact_put_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def DeleteContact(self, contact_username, my_username='default'):
"""Delete a contact from a users contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be deleted.
my_username: An optional string representing the username of the user's
contact feed from which to delete the contact. Defaults to the
currently authenticated user.
Returns:
True if the contact was deleted successfully
"""
contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Delete(contact_edit_uri)
def _GetDeveloperKey(self):
"""Getter for Developer Key property.
Returns:
If the developer key has been set, a string representing the developer key
is returned or None.
"""
if 'X-GData-Key' in self.additional_headers:
return self.additional_headers['X-GData-Key'][4:]
else:
return None
def _SetDeveloperKey(self, developer_key):
"""Setter for Developer Key property.
Sets the developer key in the 'X-GData-Key' header. The actual value that
is set is 'key=' plus the developer_key that was passed.
"""
self.additional_headers['X-GData-Key'] = 'key=' + developer_key
developer_key = property(_GetDeveloperKey, _SetDeveloperKey,
doc="""The Developer Key property""")
def _GetClientId(self):
"""Getter for Client Id property.
Returns:
If the client_id has been set, a string representing it is returned
or None.
"""
if 'X-Gdata-Client' in self.additional_headers:
return self.additional_headers['X-Gdata-Client']
else:
return None
def _SetClientId(self, client_id):
"""Setter for Client Id property.
Sets the 'X-Gdata-Client' header.
"""
self.additional_headers['X-Gdata-Client'] = client_id
client_id = property(_GetClientId, _SetClientId,
doc="""The ClientId property""")
def Query(self, uri):
"""Performs a query and returns a resulting feed or entry.
Args:
uri: A string representing the URI of the feed that is to be queried.
Returns:
On success, a tuple in the form:
(boolean succeeded=True, ElementTree._Element result)
On failure, a tuple in the form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response})
"""
result = self.Get(uri)
return result
def YouTubeQuery(self, query):
"""Performs a YouTube specific query and returns a resulting feed or entry.
Args:
query: A Query object or one if its sub-classes (YouTubeVideoQuery,
YouTubeUserQuery or YouTubePlaylistQuery).
Returns:
Depending on the type of Query object submitted returns either a
YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the
Query object provided was not YouTube-related, a tuple is returned.
On success the tuple will be in this form:
(boolean succeeded=True, ElementTree._Element result)
On failure, the tuple will be in this form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server response})
"""
result = self.Query(query.ToUri())
if isinstance(query, YouTubeVideoQuery):
return gdata.youtube.YouTubeVideoFeedFromString(result.ToString())
elif isinstance(query, YouTubeUserQuery):
return gdata.youtube.YouTubeUserFeedFromString(result.ToString())
elif isinstance(query, YouTubePlaylistQuery):
return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString())
else:
return result
class YouTubeVideoQuery(gdata.service.Query):
"""Subclasses gdata.service.Query to represent a YouTube Data API query.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions. Please refer to the API documentation for details.
Attributes:
vq: The vq parameter, which is only supported for video feeds, specifies a
search query term. Refer to API documentation for further details.
orderby: The orderby parameter, which is only supported for video feeds,
specifies the value that will be used to sort videos in the search
result set. Valid values for this parameter are relevance, published,
viewCount and rating.
time: The time parameter, which is only available for the top_rated,
top_favorites, most_viewed, most_discussed, most_linked and
most_responded standard feeds, restricts the search to videos uploaded
within the specified time. Valid values for this parameter are today
(1 day), this_week (7 days), this_month (1 month) and all_time.
The default value for this parameter is all_time.
format: The format parameter specifies that videos must be available in a
particular video format. Refer to the API documentation for details.
racy: The racy parameter allows a search result set to include restricted
content as well as standard content. Valid values for this parameter
are include and exclude. By default, restricted content is excluded.
lr: The lr parameter restricts the search to videos that have a title,
description or keywords in a specific language. Valid values for the lr
parameter are ISO 639-1 two-letter language codes.
restriction: The restriction parameter identifies the IP address that
should be used to filter videos that can only be played in specific
countries.
location: A string of geo coordinates. Note that this is not used when the
search is performed but rather to filter the returned videos for ones
that match to the location entered.
"""
def __init__(self, video_id=None, feed_type=None, text_query=None,
params=None, categories=None):
if feed_type in YOUTUBE_STANDARDFEEDS:
feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type)
elif feed_type is 'responses' or feed_type is 'comments' and video_id:
feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id,
feed_type)
else:
feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER)
gdata.service.Query.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
def _GetVideoQuery(self):
if 'vq' in self:
return self['vq']
else:
return None
def _SetVideoQuery(self, val):
self['vq'] = val
vq = property(_GetVideoQuery, _SetVideoQuery,
doc="""The video query (vq) query parameter""")
def _GetOrderBy(self):
if 'orderby' in self:
return self['orderby']
else:
return None
def _SetOrderBy(self, val):
if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS:
if val.startswith('relevance_lang_') is False:
raise YouTubeError('OrderBy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS))
self['orderby'] = val
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The orderby query parameter""")
def _GetTime(self):
if 'time' in self:
return self['time']
else:
return None
def _SetTime(self, val):
if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS:
raise YouTubeError('Time must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS))
self['time'] = val
time = property(_GetTime, _SetTime,
doc="""The time query parameter""")
def _GetFormat(self):
if 'format' in self:
return self['format']
else:
return None
def _SetFormat(self, val):
if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS:
raise YouTubeError('Format must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS))
self['format'] = val
format = property(_GetFormat, _SetFormat,
doc="""The format query parameter""")
def _GetRacy(self):
if 'racy' in self:
return self['racy']
else:
return None
def _SetRacy(self, val):
if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS:
raise YouTubeError('Racy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS))
self['racy'] = val
racy = property(_GetRacy, _SetRacy,
doc="""The racy query parameter""")
def _GetLanguageRestriction(self):
if 'lr' in self:
return self['lr']
else:
return None
def _SetLanguageRestriction(self, val):
self['lr'] = val
lr = property(_GetLanguageRestriction, _SetLanguageRestriction,
doc="""The lr (language restriction) query parameter""")
def _GetIPRestriction(self):
if 'restriction' in self:
return self['restriction']
else:
return None
def _SetIPRestriction(self, val):
self['restriction'] = val
restriction = property(_GetIPRestriction, _SetIPRestriction,
doc="""The restriction query parameter""")
def _GetLocation(self):
if 'location' in self:
return self['location']
else:
return None
def _SetLocation(self, val):
self['location'] = val
location = property(_GetLocation, _SetLocation,
doc="""The location query parameter""")
class YouTubeUserQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform user-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, username=None, feed_type=None, subscription_id=None,
text_query=None, params=None, categories=None):
uploads_favorites_playlists = ('uploads', 'favorites', 'playlists')
if feed_type is 'subscriptions' and subscription_id and username:
feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username,
feed_type, subscription_id)
elif feed_type is 'subscriptions' and not subscription_id and username:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
elif feed_type in uploads_favorites_playlists:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
else:
feed = "http://%s/feeds/users" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
class YouTubePlaylistQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform playlist-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, playlist_id, text_query=None, params=None,
categories=None):
if playlist_id:
feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id)
else:
feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
|
cosmicAsymmetry/zulip
|
refs/heads/master
|
zerver/lib/test_fixtures.py
|
11
|
# -*- coding: utf-8 -*-
import os
import re
import hashlib
from typing import Any, Optional
from importlib import import_module
from typing import Text
from six.moves import cStringIO as StringIO
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.utils import OperationalError
from django.apps import apps
from django.core.management import call_command
from django.utils.module_loading import module_has_submodule
FILENAME_SPLITTER = re.compile('[\W\-_]')
TEST_DB_STATUS_DIR = 'var/test_db_status'
def database_exists(database_name, **options):
# type: (Text, **Any) -> bool
db = options.get('database', DEFAULT_DB_ALIAS)
try:
connection = connections[db]
with connection.cursor() as cursor:
cursor.execute("SELECT 1 from pg_database WHERE datname='{}';".format(database_name))
return_value = bool(cursor.fetchone())
connections.close_all()
return return_value
except OperationalError:
return False
def get_migration_status(**options):
# type: (**Any) -> str
verbosity = options.get('verbosity', 1)
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
app_labels = [options['app_label']] if options.get('app_label') else None
db = options.get('database', DEFAULT_DB_ALIAS)
out = StringIO()
call_command(
'showmigrations',
'--list',
app_labels=app_labels,
database=db,
no_color=options.get('no_color', False),
settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']),
stdout=out,
traceback=options.get('traceback', True),
verbosity=verbosity,
)
connections.close_all()
out.seek(0)
output = out.read()
return re.sub('\x1b\[(1|0)m', '', output)
def are_migrations_the_same(migration_file, **options):
# type: (Text, **Any) -> bool
if not os.path.exists(migration_file):
return False
with open(migration_file) as f:
migration_content = f.read()
return migration_content == get_migration_status(**options)
def _get_hash_file_path(source_file_path):
# type: (str) -> str
basename = os.path.basename(source_file_path)
filename = '_'.join(FILENAME_SPLITTER.split(basename)).lower()
return os.path.join(TEST_DB_STATUS_DIR, filename)
def _check_hash(target_hash_file, **options):
# type: (str, **Any) -> bool
"""
This function has a side effect of creating a new hash file or
updating the old hash file.
"""
source_hash_file = _get_hash_file_path(target_hash_file)
with open(target_hash_file) as f:
target_hash_content = hashlib.sha1(f.read().encode('utf8')).hexdigest()
if os.path.exists(source_hash_file):
with open(source_hash_file) as f:
source_hash_content = f.read().strip()
else:
source_hash_content = None
with open(source_hash_file, 'w') as f:
f.write(target_hash_content)
return source_hash_content == target_hash_content
def is_template_database_current(
database_name='zulip_test_template',
migration_status='var/migration-status',
settings='zproject.test_settings',
check_files=None):
# type: (Optional[Text], Optional[Text], Optional[Text], Optional[List[str]]) -> bool
# Using str type for check_files because re.split doesn't accept unicode
if check_files is None:
check_files = [
'zilencer/management/commands/populate_db.py',
'tools/setup/postgres-init-test-db',
'tools/setup/postgres-init-dev-db',
]
if not os.path.exists(TEST_DB_STATUS_DIR):
os.mkdir(TEST_DB_STATUS_DIR)
if database_exists(database_name):
# To ensure Python evaluates all the hash tests (and thus creates the
# hash files about the current state), we evaluate them in a
# list and then process the result
hash_status = all([_check_hash(fn) for fn in check_files])
return are_migrations_the_same(migration_status, settings=settings) and hash_status
return False
|
sorz/isi
|
refs/heads/master
|
store/product/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('category', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('image', models.ImageField(upload_to='photos/%Y/%m')),
('description', models.CharField(null=True, max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('name', models.CharField(max_length=255, verbose_name='Product Name')),
('price', models.DecimalField(max_digits=9, decimal_places=2, validators=[django.core.validators.MinValueValidator(0)])),
('in_stock', models.BooleanField(default=True)),
('off_shelf', models.BooleanField(default=False)),
('description', models.TextField()),
('category', models.ForeignKey(null=True, to='category.Category')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('value', models.CharField(max_length=255)),
('name', models.ForeignKey(to='category.PropertyName')),
('product', models.ForeignKey(to='product.Product')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('point', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('product', models.ForeignKey(to='product.Product')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='photo',
name='product',
field=models.ForeignKey(null=True, to='product.Product'),
preserve_default=True,
),
]
|
Storj/dataserv-client
|
refs/heads/master
|
tests/__init__.py
|
1
|
from . test_builder import * # NOQA
from . test_client import * # NOQA
from . test_deserialize import * # NOQA
from . test_bandwidth_test import * # NOQA
|
lecaoquochung/ddnb.django
|
refs/heads/master
|
tests/get_or_create/models.py
|
90
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class DefaultPerson(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
class Profile(models.Model):
person = models.ForeignKey(Person, primary_key=True)
class Tag(models.Model):
text = models.CharField(max_length=255, unique=True)
class Thing(models.Model):
name = models.CharField(max_length=256)
tags = models.ManyToManyField(Tag)
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books', db_column="publisher_id_column")
|
NaturalGIS/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsserver_wms.py
|
4
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WMS.
From build dir, run: ctest -R PyQgsServerWMS -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '25/05/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import os
import json
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
import re
import urllib.request
import urllib.parse
import urllib.error
from qgis.testing import unittest
import osgeo.gdal # NOQA
from owslib.wms import WebMapService
from test_qgsserver import QgsServerTestBase
from qgis.core import QgsProject
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = b'MAP=[^"]+|Content-Length: \\d+'
RE_STRIP_EXTENTS = b'<(north|east|south|west)Bound(Lat|Long)itude>.*</(north|east|south|west)Bound(Lat|Long)itude>|<BoundingBox .*/>'
RE_ATTRIBUTES = b'[^>\\s]+=[^>\\s]+'
class TestQgsServerWMSTestBase(QgsServerTestBase):
"""QGIS Server WMS Tests"""
# Set to True to re-generate reference files for this class
regenerate_reference = False
def wms_request(self, request, extra=None, project='test_project.qgs', version='1.3.0'):
if not os.path.exists(project):
project = os.path.join(self.testdata_path, project)
assert os.path.exists(project), "Project file not found: " + project
query_string = 'https://www.qgis.org/?MAP=%s&SERVICE=WMS&VERSION=%s&REQUEST=%s' % (urllib.parse.quote(project), version, request)
if extra is not None:
query_string += extra
header, body = self._execute_request(query_string)
return (header, body, query_string)
def wms_request_compare(self, request, extra=None, reference_file=None, project='test_project.qgs', version='1.3.0', ignoreExtent=False, normalizeJson=False):
response_header, response_body, query_string = self.wms_request(request, extra, project, version)
response = response_header + response_body
reference_path = os.path.join(self.testdata_path, (request.lower() if not reference_file else reference_file) + '.txt')
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
def _n(r):
lines = r.split(b'\n')
b = lines[2:]
h = lines[:2]
try:
return b'\n'.join(h) + json.dumps(json.loads(b'\n'.join(b))).encode('utf8')
except:
return r
response = _n(response)
expected = _n(expected)
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'*****', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'*****', expected)
if ignoreExtent:
response = re.sub(RE_STRIP_EXTENTS, b'*****', response)
expected = re.sub(RE_STRIP_EXTENTS, b'*****', expected)
msg = "request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8'))
self.assertXMLEqual(response, expected, msg=msg)
class TestQgsServerWMS(TestQgsServerWMSTestBase):
"""QGIS Server WMS Tests"""
def test_getcapabilities(self):
self.wms_request_compare('GetCapabilities')
def test_getcapabilities_case_insensitive(self):
self.wms_request_compare('getcapabilities')
self.wms_request_compare('GETCAPABILITIES')
def test_getprojectsettings(self):
self.wms_request_compare('GetProjectSettings')
def test_getcontext(self):
self.wms_request_compare('GetContext')
def test_operation_not_supported(self):
qs = '?MAP=%s&SERVICE=WMS&VERSION=1.3.0&REQUEST=NotAValidRequest' % urllib.parse.quote(self.projectPath)
self._assert_status_code(501, qs)
def test_describelayer(self):
# Test DescribeLayer
self.wms_request_compare('DescribeLayer',
'&layers=testlayer%20%C3%A8%C3%A9&' +
'SLD_VERSION=1.1.0',
'describelayer')
def test_getstyles(self):
# Test GetStyles
self.wms_request_compare('GetStyles',
'&layers=testlayer%20%C3%A8%C3%A9&',
'getstyles')
# Test GetStyles with labeling
self.wms_request_compare('GetStyles',
'&layers=pointlabel',
'getstyles_pointlabel',
project=self.projectPath)
def test_wms_getschemaextension(self):
self.wms_request_compare('GetSchemaExtension',
'',
'getschemaextension')
def wms_request_compare_project(self, request, extra=None, reference_file=None, project_name="test_project.qgs"):
projectPath = self.testdata_path + project_name
assert os.path.exists(projectPath), "Project file not found: " + projectPath
project = QgsProject()
project.read(projectPath)
query_string = 'https://www.qgis.org/?SERVICE=WMS&VERSION=1.3.0&REQUEST=%s' % (request)
if extra is not None:
query_string += extra
header, body = self._execute_request_project(query_string, project)
response = header + body
reference_path = self.testdata_path + (request.lower() if not reference_file else reference_file) + '.txt'
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'*****', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'*****', expected)
self.assertXMLEqual(response, expected, msg="request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8')))
def test_wms_getcapabilities_project(self):
"""WMS GetCapabilities without map parameter"""
self.wms_request_compare_project('GetCapabilities')
# reference_file='getcapabilities_without_map_param' could be the right response
def test_wms_getcapabilities_project_empty_layer(self):
"""WMS GetCapabilities with empty layer different CRS: wrong bbox - Regression GH 30264"""
self.wms_request_compare_project('GetCapabilities', reference_file='wms_getcapabilities_empty_layer', project_name='bug_gh30264_empty_layer_wrong_bbox.qgs')
def wms_inspire_request_compare(self, request):
"""WMS INSPIRE tests"""
project = self.testdata_path + "test_project_inspire.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WMS&VERSION=1.3.0&REQUEST=%s' % (urllib.parse.quote(project), request)
header, body = self._execute_request(query_string)
response = header + body
reference_path = self.testdata_path + request.lower() + '_inspire.txt'
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'', expected)
self.assertXMLEqual(response, expected, msg="request %s failed.\nQuery: %s\nExpected file: %s\nResponse:\n%s" % (query_string, request, reference_path, response.decode('utf-8')))
def test_project_wms_inspire(self):
"""Test some WMS request"""
for request in ('GetCapabilities',):
self.wms_inspire_request_compare(request)
def test_wms_getcapabilities_without_title(self):
# Empty title in project leads to a Layer element without Name, Title
# and Abstract tags. However, it should still have a CRS and a BBOX
# according to OGC specifications tests.
self.wms_request_compare('GetCapabilities', reference_file='wms_getcapabilities_without_title', project='test_project_without_title.qgs')
def test_wms_getcapabilities_empty_spatial_layer(self):
# The project contains a spatial layer without feature and the WMS
# extent is not configured in the project.
self.wms_request_compare('GetCapabilities',
reference_file='wms_getcapabilities_empty_spatial_layer',
project='test_project_empty_spatial_layer.qgz',
ignoreExtent=True)
def test_wms_getcapabilities_versions(self):
# default version 1.3.0 when empty VERSION parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='')
# default version 1.3.0 when VERSION = 1.3.0 parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='1.3.0')
# version 1.1.1
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_1_1', version='1.1.1')
# default version 1.3.0 when invalid VERSION parameter
project = os.path.join(self.testdata_path, "test_project.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"REQUEST": "GetCapabilities",
}.items())])
self.wms_request_compare(qs, reference_file='wms_getcapabilities_1_3_0', version='33.33.33')
def test_wms_getcapabilities_url(self):
# empty url in project
project = os.path.join(self.testdata_path, "test_project_without_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item:
self.assertEqual("xlink:href=\"?" in item, True)
item_found = True
self.assertTrue(item_found)
# url passed in query string
# verify that GetCapabilities isn't put into the url for non-uppercase parameter names
project = os.path.join(self.testdata_path, "test_project_without_urls.qgs")
qs = "https://www.qgis-server.org?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SeRvIcE": "WMS",
"VeRsIoN": "1.3.0",
"ReQuEsT": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item:
self.assertEqual("xlink:href=\"https://www.qgis-server.org?" in item, True)
self.assertEqual("GetCapabilities" in item, False)
item_found = True
self.assertTrue(item_found)
# url well defined in project
project = os.path.join(self.testdata_path, "test_project_with_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetCapabilities",
"STYLES": ""
}.items())])
r, h = self._result(self._execute_request(qs))
item_found = False
for item in str(r).split("\\n"):
if "OnlineResource" in item and "xlink:href=\"my_wms_advertised_url?" in item:
item_found = True
self.assertTrue(item_found)
@unittest.skip('Timeout issues')
def test_wms_GetProjectSettings_wms_print_layers(self):
projectPath = self.testdata_path + "test_project_wms_printlayers.qgs"
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": projectPath,
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetProjectSettings"
}.items())])
header, body = self._execute_request(qs)
xmlResult = body.decode('utf-8')
self.assertTrue(xmlResult.find("<WMSBackgroundLayer>1</WMSBackgroundLayer>") != -1)
self.assertTrue(xmlResult.find("<WMSDataSource>contextualWMSLegend=0&crs=EPSG:21781&dpiMode=7&featureCount=10&format=image/png&layers=public_geo_gemeinden&styles=&url=https://qgiscloud.com/mhugent/qgis_unittest_wms/wms?</WMSDataSource>") != -1)
self.assertTrue(xmlResult.find("<WMSPrintLayer>contextualWMSLegend=0&amp;crs=EPSG:21781&amp;dpiMode=7&amp;featureCount=10&amp;format=image/png&amp;layers=public_geo_gemeinden&amp;styles=&amp;url=https://qgiscloud.com/mhugent/qgis_unittest_wms_print/wms?</WMSPrintLayer>") != -1)
def test_getcapabilities_owslib(self):
# read getcapabilities document
docPath = self.testdata_path + 'getcapabilities.txt'
f = open(docPath, 'r')
doc = f.read()
f.close()
# clean header in doc
doc = doc.replace('Content-Length: 15066\n', '')
doc = doc.replace('Content-Type: text/xml; charset=utf-8\n\n', '')
doc = doc.replace('<?xml version="1.0" encoding="utf-8"?>\n', '')
# read capabilities document with owslib
w = WebMapService(None, xml=doc, version='1.3.0')
# check content
rootLayerName = 'QGIS Test Project'
self.assertTrue(rootLayerName in w.contents.keys())
if __name__ == '__main__':
unittest.main()
|
mbauskar/omnitech-erpnext
|
refs/heads/master
|
erpnext/patches/v6_0/fix_outstanding_amount.py
|
87
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
def execute():
for dt, party_field, account_field in (("Sales Invoice", "customer", "debit_to"),
("Purchase Invoice", "supplier", "credit_to")):
wrong_invoices = frappe.db.sql("""select name, {0} as account from `tab{1}`
where docstatus=1 and ifnull({2}, '')=''""".format(account_field, dt, party_field))
for invoice, account in wrong_invoices:
update_outstanding_amt(account, party_field.title(), None, dt, invoice)
|
bblfsh/python-driver
|
refs/heads/master
|
fixtures/comments.py
|
2
|
# comment above
# second comment above
a = 1 # line trailing comment
# file trailing comment
# second file trailing comment
|
vigzmv/AB-Split-Tester
|
refs/heads/master
|
Tester/wsgi.py
|
1
|
"""
WSGI config for Tester project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Tester.settings")
application = get_wsgi_application()
|
javipalanca/Django-facebook
|
refs/heads/master
|
docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/commands/install.py
|
34
|
import os, sys
from pip.req import InstallRequirement, RequirementSet
from pip.req import parse_requirements
from pip.log import logger
from pip.locations import build_prefix, src_prefix
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import InstallationError
class InstallCommand(Command):
name = 'install'
usage = '%prog [OPTIONS] PACKAGE_NAMES...'
summary = 'Install packages'
bundle = False
def __init__(self):
super(InstallCommand, self).__init__()
self.parser.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='VCS+REPOS_URL[@REV]#egg=PACKAGE',
help='Install a package directly from a checkout. Source will be checked '
'out into src/PACKAGE (lower-case) and installed in-place (using '
'setup.py develop). You can run this on an existing directory/checkout (like '
'pip install -e src/mycheckout). This option may be provided multiple times. '
'Possible values for VCS are: svn, git, hg and bzr.')
self.parser.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='FILENAME',
help='Install all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.parser.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL to look for packages at')
self.parser.add_option(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='http://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default)')
self.parser.add_option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url')
self.parser.add_option(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead)')
self.parser.add_option(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help='Use the PyPI mirrors as a fallback in case the main index is down.')
self.parser.add_option(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help='Specific mirror URLs to query when --use-mirrors is used')
self.parser.add_option(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='DIR',
default=None,
help='Unpack packages into DIR (default %s) and build from there' % build_prefix)
self.parser.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='DIR',
default=None,
help='Download packages into DIR instead of installing them')
self.parser.add_option(
'--download-cache',
dest='download_cache',
metavar='DIR',
default=None,
help='Cache downloaded packages in DIR')
self.parser.add_option(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='DIR',
default=None,
help='Check out --editable packages into DIR (default %s)' % src_prefix)
self.parser.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version')
self.parser.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead)')
self.parser.add_option(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help='Ignore package dependencies')
self.parser.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="Download and unpack all packages, but don't actually install them")
self.parser.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="Don't download any packages, just install the ones already downloaded "
"(completes an install run with --no-install)")
self.parser.add_option(
'--install-option',
dest='install_options',
action='append',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
self.parser.add_option(
'--global-option',
dest='global_options',
action='append',
help="Extra global options to be supplied to the setup.py"
"call before the install command")
self.parser.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install to user-site')
def _build_package_finder(self, options, index_urls):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_mirrors=options.use_mirrors,
mirrors=options.mirrors)
def run(self, options, args):
if not options.build_dir:
options.build_dir = build_prefix
if not options.src_dir:
options.src_dir = src_prefix
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
install_options.append('--user')
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
finder = self._build_package_finder(options, index_urls)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
if options.find_links:
raise InstallationError('You must give at least one '
'requirement to %s (maybe you meant "pip install %s"?)'
% (self.name, " ".join(options.find_links)))
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
if (options.use_user_site and
sys.version_info < (2, 6)):
raise InstallationError('--user is only supported in Python version 2.6 and newer')
import setuptools
if (options.use_user_site and
requirement_set.has_editables and
not getattr(setuptools, '_distribute', False)):
raise InstallationError('--user --editable not supported with setuptools, use distribute')
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
# Clean up
if not options.no_install:
requirement_set.cleanup_files(bundle=self.bundle)
return requirement_set
InstallCommand()
|
indevgr/django
|
refs/heads/master
|
tests/forms_tests/tests/test_widgets.py
|
6
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.forms import (
CheckboxSelectMultiple, ClearableFileInput, RadioSelect, TextInput,
)
from django.forms.widgets import (
ChoiceFieldRenderer, ChoiceInput, RadioFieldRenderer,
)
from django.test import SimpleTestCase, override_settings
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import SafeData
from ..models import Article
class FormsWidgetTests(SimpleTestCase):
def test_radiofieldrenderer(self):
# RadioSelect uses a RadioFieldRenderer to render the individual radio inputs.
# You can manipulate that object directly to customize the way the RadioSelect
# is rendered.
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
inp_set1 = []
inp_set2 = []
inp_set3 = []
inp_set4 = []
for inp in r:
inp_set1.append(str(inp))
inp_set2.append('%s<br />' % inp)
inp_set3.append('<p>%s %s</p>' % (inp.tag(), inp.choice_label))
inp_set4.append(
'%s %s %s %s %s' % (
inp.name,
inp.value,
inp.choice_value,
inp.choice_label,
inp.is_checked(),
)
)
self.assertHTMLEqual('\n'.join(inp_set1), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
<label><input type="radio" name="beatle" value="P" /> Paul</label>
<label><input type="radio" name="beatle" value="G" /> George</label>
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join(inp_set2), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label><br />""")
self.assertHTMLEqual('\n'.join(inp_set3), """<p><input checked="checked" type="radio" name="beatle" value="J" /> John</p>
<p><input type="radio" name="beatle" value="P" /> Paul</p>
<p><input type="radio" name="beatle" value="G" /> George</p>
<p><input type="radio" name="beatle" value="R" /> Ringo</p>""")
self.assertHTMLEqual('\n'.join(inp_set4), """beatle J J John True
beatle J P Paul False
beatle J G George False
beatle J R Ringo False""")
# A RadioFieldRenderer object also allows index access to individual RadioChoiceInput
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
self.assertHTMLEqual(str(r[1]), '<label><input type="radio" name="beatle" value="P" /> Paul</label>')
self.assertHTMLEqual(
str(r[0]),
'<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>'
)
self.assertTrue(r[0].is_checked())
self.assertFalse(r[1].is_checked())
self.assertEqual((r[1].name, r[1].value, r[1].choice_value, r[1].choice_label), ('beatle', 'J', 'P', 'Paul'))
# These individual widgets can accept extra attributes if manually rendered.
self.assertHTMLEqual(
r[1].render(attrs={'extra': 'value'}),
'<label><input type="radio" extra="value" name="beatle" value="P" /> Paul</label>'
)
with self.assertRaises(IndexError):
r[10]
# You can create your own custom renderers for RadioSelect to use.
class MyRenderer(RadioFieldRenderer):
def render(self):
return '<br />\n'.join(six.text_type(choice) for choice in self)
w = RadioSelect(renderer=MyRenderer)
self.assertHTMLEqual(
w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
"""<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>"""
)
# Or you can use custom RadioSelect fields that use your custom renderer.
class CustomRadioSelect(RadioSelect):
renderer = MyRenderer
w = CustomRadioSelect()
self.assertHTMLEqual(
w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
"""<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>"""
)
# You can customize rendering with outer_html/inner_html renderer variables (#22950)
class MyRenderer(RadioFieldRenderer):
# str is just to test some Python 2 issue with bytestrings
outer_html = str('<div{id_attr}>{content}</div>')
inner_html = '<p>{choice_value}{sub_widgets}</p>'
w = RadioSelect(renderer=MyRenderer)
output = w.render('beatle', 'J',
choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')),
attrs={'id': 'bar'})
self.assertIsInstance(output, SafeData)
self.assertHTMLEqual(
output,
"""<div id="bar">
<p><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></p>
<p><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></p>
<p><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></p>
<p><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></p>
</div>""")
def test_subwidget(self):
# Each subwidget tag gets a separate ID when the widget has an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" />""")
# Each subwidget tag does not get an ID if the widget does not have an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple().subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" />
<input type="checkbox" name="letters" value="b" />
<input checked="checked" type="checkbox" name="letters" value="c" />""")
# The id_for_label property of the subwidget should return the ID that is used on the subwidget's tag
self.assertHTMLEqual("\n".join('<input type="checkbox" name="letters" value="%s" id="%s" />' % (c.choice_value, c.id_for_label) for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', [], choices=zip(list('abc'), list('ABC')))), """<input type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input type="checkbox" name="letters" value="c" id="abc_2" />""")
def test_sub_widget_html_safe(self):
widget = TextInput()
subwidget = next(widget.subwidgets('username', 'John Doe'))
self.assertTrue(hasattr(subwidget, '__html__'))
self.assertEqual(force_text(subwidget), subwidget.__html__())
def test_choice_input_html_safe(self):
widget = ChoiceInput('choices', 'CHOICE1', {}, ('CHOICE1', 'first choice'), 0)
self.assertTrue(hasattr(ChoiceInput, '__html__'))
self.assertEqual(force_text(widget), widget.__html__())
def test_choice_field_renderer_html_safe(self):
renderer = ChoiceFieldRenderer('choices', 'CHOICE1', {}, [('CHOICE1', 'first_choice')])
renderer.choice_input_class = lambda *args: args
self.assertTrue(hasattr(ChoiceFieldRenderer, '__html__'))
self.assertEqual(force_text(renderer), renderer.__html__())
@override_settings(ROOT_URLCONF='forms_tests.urls')
class LiveWidgetTests(AdminSeleniumTestCase):
available_apps = ['forms_tests'] + AdminSeleniumTestCase.available_apps
def test_textarea_trailing_newlines(self):
"""
Test that a roundtrip on a ModelForm doesn't alter the TextField value
"""
article = Article.objects.create(content="\nTst\n")
self.selenium.get(self.live_server_url + reverse('article_form', args=[article.pk]))
self.selenium.find_element_by_id('submit').submit()
article = Article.objects.get(pk=article.pk)
# Should be "\nTst\n" after #19251 is fixed
self.assertEqual(article.content, "\r\nTst\r\n")
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
"""
url = 'something'
def __str__(self):
return self.url
class ClearableFileInputTests(SimpleTestCase):
def test_render_custom_template(self):
widget = ClearableFileInput()
widget.template_with_initial = (
'%(initial_text)s: <img src="%(initial_url)s" alt="%(initial)s" /> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
self.assertHTMLEqual(
widget.render('myfile', FakeFieldFile()),
'Currently: <img src="something" alt="something" /> '
'<input type="checkbox" name="myfile-clear" id="myfile-clear_id" /> '
'<label for="myfile-clear_id">Clear</label><br />Change: <input type="file" name="myfile" />'
)
|
diox/olympia
|
refs/heads/master
|
src/olympia/git/migrations/0001_initial.py
|
6
|
# Generated by Django 2.2.12 on 2020-04-22 11:50
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [('addons', '0004_auto_20191126_1712')]
operations = [
migrations.CreateModel(
name='GitExtractionEntry',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
models.DateTimeField(
blank=True,
default=django.utils.timezone.now,
editable=False,
),
),
('modified', models.DateTimeField(auto_now=True)),
(
'addon',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='addons.Addon',
),
),
('in_progress', models.NullBooleanField(default=None)),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
'unique_together': {('addon', 'in_progress')},
},
bases=(
olympia.amo.models.SearchMixin,
olympia.amo.models.SaveUpdateMixin,
models.Model,
),
)
]
|
hbrunn/OCB
|
refs/heads/8.0
|
addons/sale/report/sale_report.py
|
71
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class sale_report(osv.osv):
_name = "sale.report"
_description = "Sales Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date Order', readonly=True), # TDE FIXME master: rename into date_order
'date_confirm': fields.date('Date Confirm', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'product_uom_qty': fields.float('# of Qty', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'state': fields.selection([
('cancel', 'Cancelled'),
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('done', 'Done')], 'Order Status', readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_order = 'date desc'
def _select(self):
select_str = """
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
SELECT min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * l.price_unit / cr.rate * (100.0-l.discount) / 100.0) as price_total,
count(*) as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
s.partner_id as partner_id,
s.user_id as user_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
l.state,
t.categ_id as categ_id,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
s.section_id as section_id
"""
return select_str
def _from(self):
from_str = """
sale_order_line l
join sale_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join product_pricelist pp on (s.pricelist_id = pp.id)
join currency_rate cr on (cr.currency_id = pp.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY l.product_id,
l.order_id,
t.uom_id,
t.categ_id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.company_id,
l.state,
s.pricelist_id,
s.project_id,
s.section_id
"""
return group_by_str
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM ( %s )
%s
)""" % (self._table, self._select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nirbheek/cerbero-old
|
refs/heads/master
|
cerbero/utils/__init__.py
|
3
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import shutil
import sys
try:
import sysconfig
except:
from distutils import sysconfig
try:
import xml.etree.cElementTree as etree
except ImportError:
from lxml import etree
import gettext
import platform as pplatform
import re
from cerbero.enums import Platform, Architecture, Distro, DistroVersion
from cerbero.errors import FatalError
from cerbero.utils import messages as m
_ = gettext.gettext
N_ = lambda x: x
class ArgparseArgument(object):
def __init__(self, *name, **kwargs):
self.name = name
self.args = kwargs
def add_to_parser(self, parser):
parser.add_argument(*self.name, **self.args)
def user_is_root():
''' Check if the user running the process is root '''
return hasattr(os, 'getuid') and os.getuid() == 0
def determine_num_of_cpus():
''' Number of virtual or physical CPUs on this system '''
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
return 1
def to_winpath(path):
if path.startswith('/'):
path = '%s:%s' % (path[1], path[2:])
return path.replace('/', '\\')
def to_unixpath(path):
if path[1] == ':':
path = '/%s%s' % (path[0], path[2:])
return path
def to_winepath(path):
path = path.replace('/', '\\\\')
# wine maps the filesystem root '/' to 'z:\'
path = 'z:\\%s' % path
return path
def fix_winpath(path):
return path.replace('\\', '/')
def system_info():
'''
Get the sysem information.
Return a tuple with the platform type, the architecture and the
distribution
'''
# Get the platform info
platform = sys.platform
if platform.startswith('win'):
platform = Platform.WINDOWS
elif platform.startswith('darwin'):
platform = Platform.DARWIN
elif platform.startswith('linux'):
platform = Platform.LINUX
else:
raise FatalError(_("Platform %s not supported") % platform)
# Get the architecture info
if platform == Platform.WINDOWS:
platform_str = sysconfig.get_platform()
if platform_str in ['win-amd64', 'win-ia64']:
arch = Architecture.X86_64
else:
arch = Architecture.X86
else:
uname = os.uname()
arch = uname[4]
if arch == 'x86_64':
arch = Architecture.X86_64
elif arch.endswith('86'):
arch = Architecture.X86
else:
raise FatalError(_("Architecture %s not supported") % arch)
# Get the distro info
if platform == Platform.LINUX:
d = pplatform.linux_distribution()
if d[0] in ['Ubuntu', 'debian', 'LinuxMint']:
distro = Distro.DEBIAN
if d[2] in ['maverick', 'isadora']:
distro_version = DistroVersion.UBUNTU_MAVERICK
elif d[2] in ['lucid', 'julia']:
distro_version = DistroVersion.UBUNTU_LUCID
elif d[2] in ['natty', 'katya']:
distro_version = DistroVersion.UBUNTU_NATTY
elif d[2] in ['oneiric', 'lisa']:
distro_version = DistroVersion.UBUNTU_ONEIRIC
elif d[2] in ['precise', 'maya']:
distro_version = DistroVersion.UBUNTU_PRECISE
elif d[2] in ['quantal', 'nadia']:
distro_version = DistroVersion.UBUNTU_QUANTAL
elif d[2] in ['raring', 'olivia']:
distro_version = DistroVersion.UBUNTU_RARING
elif d[2] in ['saucy', 'petra']:
distro_version = DistroVersion.UBUNTU_SAUCY
elif d[2] in ['trusty', 'qiana']:
distro_version = DistroVersion.UBUNTU_TRUSTY
elif d[2] in ['utopic']:
distro_version = DistroVersion.UBUNTU_UTOPIC
elif d[1].startswith('6.'):
distro_version = DistroVersion.DEBIAN_SQUEEZE
elif d[1].startswith('7.') or d[1].startswith('wheezy'):
distro_version = DistroVersion.DEBIAN_WHEEZY
elif d[1].startswith('8.') or d[1].startswith('jessie'):
distro_version = DistroVersion.DEBIAN_JESSIE
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0] in ['RedHat', 'Fedora', 'CentOS', 'Red Hat Enterprise Linux Server', 'CentOS Linux']:
distro = Distro.REDHAT
if d[1] == '16':
distro_version = DistroVersion.FEDORA_16
elif d[1] == '17':
distro_version = DistroVersion.FEDORA_17
elif d[1] == '18':
distro_version = DistroVersion.FEDORA_18
elif d[1] == '19':
distro_version = DistroVersion.FEDORA_19
elif d[1] == '20':
distro_version = DistroVersion.FEDORA_20
elif d[1] == '21':
distro_version = DistroVersion.FEDORA_21
elif d[1].startswith('6.'):
distro_version = DistroVersion.REDHAT_6
elif d[1].startswith('7.'):
distro_version = DistroVersion.REDHAT_7
else:
# FIXME Fill this
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0].strip() in ['openSUSE']:
distro = Distro.SUSE
if d[1] == '12.1':
distro_version = DistroVersion.OPENSUSE_12_1
elif d[1] == '12.2':
distro_version = DistroVersion.OPENSUSE_12_2
elif d[1] == '12.3':
distro_version = DistroVersion.OPENSUSE_12_3
else:
# FIXME Fill this
raise FatalError("Distribution OpenSuse '%s' "
"not supported" % str(d))
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif platform == Platform.WINDOWS:
distro = Distro.WINDOWS
win32_ver = pplatform.win32_ver()[0]
dmap = {'xp': DistroVersion.WINDOWS_XP,
'vista': DistroVersion.WINDOWS_VISTA,
'7': DistroVersion.WINDOWS_7,
'post2008Server': DistroVersion.WINDOWS_8,
'8': DistroVersion.WINDOWS_8}
if win32_ver in dmap:
distro_version = dmap[win32_ver]
else:
raise FatalError("Windows version '%s' not supported" % win32_ver)
elif platform == Platform.DARWIN:
distro = Distro.OS_X
ver = pplatform.mac_ver()[0]
if ver.startswith('10.10'):
distro_version = DistroVersion.OS_X_YOSEMITE
elif ver.startswith('10.9'):
distro_version = DistroVersion.OS_X_MAVERICKS
elif ver.startswith('10.8'):
distro_version = DistroVersion.OS_X_MOUNTAIN_LION
else:
raise FatalError("Mac version %s not supported" % ver)
num_of_cpus = determine_num_of_cpus()
return platform, arch, distro, distro_version, num_of_cpus
def validate_packager(packager):
# match packager in the form 'Name <email>'
expr = r'(.*\s)*[<]([a-zA-Z0-9+_\-\.]+@'\
'[0-9a-zA-Z][.-0-9a-zA-Z]*.[a-zA-Z]+)[>]$'
return bool(re.match(expr, packager))
def copy_files(origdir, destdir, files, extensions, target_platform):
for f in files:
f = f % extensions
install_dir = os.path.dirname(os.path.join(destdir, f))
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if destdir[1] == ':':
# windows path
relprefix = to_unixpath(destdir)[2:]
else:
relprefix = destdir[1:]
orig = os.path.join(origdir, relprefix, f)
dest = os.path.join(destdir, f)
m.action("copying %s to %s" % (orig, dest))
try:
shutil.copy(orig, dest)
except IOError:
m.warning("Could not copy %s to %s" % (orig, dest))
def remove_list_duplicates(seq):
''' Remove list duplicates maintaining the order '''
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def parse_file(filename, dict):
try:
execfile(filename, dict)
except Exception, ex:
import traceback
traceback.print_exc()
raise ex
def escape_path(path):
path = path.replace('\\', '/')
path = path.replace('(', '\\\(').replace(')', '\\\)')
path = path.replace(' ', '\\\\ ')
return path
def get_wix_prefix():
if 'WIX' in os.environ:
wix_prefix = os.path.join(os.environ['WIX'], 'bin')
else:
wix_prefix = 'C:/Program Files%s/Windows Installer XML v3.5/bin'
if not os.path.exists(wix_prefix):
wix_prefix = wix_prefix % ' (x86)'
if not os.path.exists(wix_prefix):
raise FatalError("The required packaging tool 'WiX' was not found")
return escape_path(to_unixpath(wix_prefix))
def add_system_libs(config, new_env):
'''
Add /usr/lib/pkgconfig to PKG_CONFIG_PATH so the system's .pc file
can be found.
'''
arch = config.target_arch
libdir = 'lib'
if arch == Architecture.X86:
arch = 'i386'
else:
if config.distro == Distro.REDHAT:
libdir = 'lib64'
search_paths = [os.environ['PKG_CONFIG_LIBDIR'],
'/usr/%s/pkgconfig' % libdir, '/usr/share/pkgconfig',
'/usr/lib/%s-linux-gnu/pkgconfig' % arch]
new_env['PKG_CONFIG_PATH'] = ':'.join(search_paths)
|
flar2/m7-bulletproof
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Endika/edx-platform
|
refs/heads/master
|
lms/lib/courseware_search/test/test_lms_filter_generator.py
|
43
|
"""
Tests for the lms_filter_generator
"""
from mock import patch, Mock
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from lms.lib.courseware_search.lms_filter_generator import LmsSearchFilterGenerator
class LmsSearchFilterGeneratorTestCase(ModuleStoreTestCase):
""" Test case class to test search result processor """
def build_courses(self):
"""
Build up a course tree with multiple test courses
"""
self.courses = [
CourseFactory.create(
org='ElasticsearchFiltering',
course='ES101F',
run='test_run',
display_name='Elasticsearch Filtering test course',
),
CourseFactory.create(
org='FilterTest',
course='FT101',
run='test_run',
display_name='FilterTest test course',
)
]
self.chapter = ItemFactory.create(
parent_location=self.courses[0].location,
category='chapter',
display_name="Week 1",
publish_item=True,
)
self.chapter2 = ItemFactory.create(
parent_location=self.courses[1].location,
category='chapter',
display_name="Week 1",
publish_item=True,
)
def setUp(self):
super(LmsSearchFilterGeneratorTestCase, self).setUp()
self.build_courses()
self.user = UserFactory.create(username="jack", email="jack@fake.edx.org", password='test')
for course in self.courses:
CourseEnrollment.enroll(self.user, course.location.course_key)
def test_course_id_not_provided(self):
"""
Tests that we get the list of IDs of courses the user is enrolled in when the course ID is null or not provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertTrue('start_date' in filter_dictionary)
self.assertIn(unicode(self.courses[0].id), field_dictionary['course'])
self.assertIn(unicode(self.courses[1].id), field_dictionary['course'])
def test_course_id_provided(self):
"""
Tests that we get the course ID when the course ID is provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters(
user=self.user,
course_id=unicode(self.courses[0].id)
)
self.assertTrue('start_date' in filter_dictionary)
self.assertEqual(unicode(self.courses[0].id), field_dictionary['course'])
def test_user_not_provided(self):
"""
Tests that we get empty list of courses in case the user is not provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters()
self.assertTrue('start_date' in filter_dictionary)
self.assertEqual(0, len(field_dictionary['course']))
def test_excludes_microsite(self):
"""
By default there is the test microsite and the microsite with logistration
to exclude
"""
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertIn('org', exclude_dictionary)
exclude_orgs = exclude_dictionary['org']
self.assertEqual(2, len(exclude_orgs))
self.assertEqual('LogistrationX', exclude_orgs[0])
self.assertEqual('TestMicrositeX', exclude_orgs[1])
@patch('microsite_configuration.microsite.get_all_orgs', Mock(return_value=[]))
def test_excludes_no_microsite(self):
""" Test when no microsite is present - nothing to exclude """
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
@patch('microsite_configuration.microsite.get_value', Mock(return_value='TestMicrositeX'))
def test_excludes_microsite_within(self):
field_dictionary, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
self.assertIn('org', field_dictionary)
self.assertEqual('TestMicrositeX', field_dictionary['org'])
@patch(
'microsite_configuration.microsite.get_all_orgs',
Mock(return_value=["TestMicrosite1", "TestMicrosite2", "TestMicrosite3", "TestMicrosite4"])
)
def test_excludes_multi_microsites(self):
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertIn('org', exclude_dictionary)
exclude_orgs = exclude_dictionary['org']
self.assertEqual(4, len(exclude_orgs))
self.assertIn('TestMicrosite1', exclude_orgs)
self.assertIn('TestMicrosite2', exclude_orgs)
self.assertIn('TestMicrosite3', exclude_orgs)
self.assertIn('TestMicrosite4', exclude_orgs)
@patch(
'microsite_configuration.microsite.get_all_orgs',
Mock(return_value=["TestMicrosite1", "TestMicrosite2", "TestMicrosite3", "TestMicrosite4"])
)
@patch('microsite_configuration.microsite.get_value', Mock(return_value='TestMicrosite3'))
def test_excludes_multi_microsites_within(self):
field_dictionary, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
self.assertIn('org', field_dictionary)
self.assertEqual('TestMicrosite3', field_dictionary['org'])
|
xy-VSFilter/xy-VSFilter
|
refs/heads/master
|
src/thirdparty/gtest/test/gtest_env_var_test.py
|
2408
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
EDUlib/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/discussions/migrations/0003_alter_provider_filter_list.py
|
4
|
# Generated by Django 2.2.19 on 2021-03-23 14:46
from django.db import migrations, models
import django_mysql.models
class Migration(migrations.Migration):
dependencies = [
('discussions', '0002_add_provider_filter'),
]
operations = [
migrations.AlterField(
model_name='providerfilter',
name='allow',
field=django_mysql.models.ListCharField(models.CharField(choices=[('legacy', 'legacy'), ('piazza', 'piazza')], max_length=20), blank=True, help_text='Comma-separated list of providers to allow, eg: legacy,piazza', max_length=63, size=3, verbose_name='Allow List'),
),
migrations.AlterField(
model_name='providerfilter',
name='deny',
field=django_mysql.models.ListCharField(models.CharField(choices=[('legacy', 'legacy'), ('piazza', 'piazza')], max_length=20), blank=True, help_text='Comma-separated list of providers to deny, eg: legacy,piazza', max_length=63, size=3, verbose_name='Deny List'),
),
]
|
Asurada2015/TFAPI_translation
|
refs/heads/master
|
math_ops_basicoperation/tf_pow.py
|
1
|
import tensorflow as tf
"""tf.pow(x,y,name=None)
功能:计算x各元素的y次方。
输入:x,y为张量,可以为`float32`, `float64`, `int32`, `int64`,`complex64`,`complex128`类型。"""
x = tf.constant([[2, 3, 5], [2, 3, 5]], tf.float64)
y = tf.constant([[2, 3, 4]], tf.float64)
z = tf.pow(x, y)
sess = tf.Session()
print(sess.run(z))
sess.close()
"""[[ 4. 27. 625.]
[ 4. 27. 625.]]"""
|
marcreyesph/scancode-toolkit
|
refs/heads/develop
|
src/scancode/pool.py
|
4
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
"""
Utilities and patches to create multiprocessing Process pools.
Apply proper monkeypatch to work around some bugs or limitations.
"""
"""
Monkeypatch Pool iterators so that Ctrl-C interrupts everything properly
derived from https://gist.github.com/aljungberg/626518
Copyright (c) Alexander Ljungberg. All rights reserved.
Modifications Copyright (c) 2017 nexB Inc. and others. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from multiprocessing import pool
def wrapped(func):
# ensure that we do not double wrap
if func.func_name != 'wrap':
def wrap(self, timeout=None):
return func(self, timeout=timeout or 1e10)
return wrap
else:
return func
pool.IMapIterator.next = wrapped(pool.IMapIterator.next)
pool.IMapIterator.__next__ = pool.IMapIterator.next
pool.IMapUnorderedIterator.next = wrapped(pool.IMapUnorderedIterator.next)
pool.IMapUnorderedIterator.__next__ = pool.IMapUnorderedIterator.next
def get_pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
return pool.Pool(processes, initializer, initargs, maxtasksperchild)
|
Integral-Technology-Solutions/ConfigNOW-4.3
|
refs/heads/master
|
Lib/xml/sax/sax2exts.py
|
2
|
"""
Various extensions to the core SAX 2.0 API.
$Id: sax2exts.py,v 1.4 2001/03/03 07:30:06 loewis Exp $
"""
import saxexts,saxlib
# In SAX2, validation is turned-on through a property. Make sure
# that all parsers returned from this factory are validating
class ValidatingReaderFactory(saxexts.ParserFactory):
def make_parser(self, parser_list = []):
p = saxexts.ParserFactory.make_parser(self,parser_list)
p.setFeature(saxlib.feature_validation, 1)
return p
# --- XMLReader factory
XMLReaderFactory = saxexts.ParserFactory
# --- Creating parser factories
XMLParserFactory = XMLReaderFactory(["xml.sax.drivers2.drv_pyexpat",
"xml.sax.drivers2.drv_xmlproc"])
XMLValParserFactory = ValidatingReaderFactory(["xml.sax.drivers2.drv_xmlproc"])
HTMLParserFactory = XMLReaderFactory([])
SGMLParserFactory = XMLReaderFactory([])
def make_parser(parser_list = []):
return XMLParserFactory.make_parser(parser_list)
|
ishay2b/tensorflow
|
refs/heads/segnet
|
tensorflow/python/ops/distributions/identity_bijector.py
|
73
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Identity",
]
class Identity(bijector.Bijector):
"""Compute Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, event_ndims=0, name="identity"):
super(Identity, self).__init__(
is_constant_jacobian=True,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x
def _inverse(self, y):
return y
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
|
laurenbarker/SHARE
|
refs/heads/master
|
share/migrations/0010_auto_20161212_1418_d.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-12 14:18
from __future__ import unicode_literals
import db.deletion
from django.conf import settings
from django.db import migrations, models
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0010_auto_20161212_1418_c'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='tag',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='tag',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='tag',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_tag', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tag',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_tag_version', to='share.TagVersion'),
),
migrations.AlterField(
model_name='tagversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_tagversion', to='share.Change'),
),
migrations.AlterField(
model_name='tagversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='tagversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='tagversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='tagversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='award',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.Award'),
),
migrations.AlterField(
model_name='throughawards',
name='award_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AwardVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughawards', to='share.Change'),
),
migrations.AlterField(
model_name='throughawards',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughawards',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='funder',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughawards',
name='funder_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwards'),
),
migrations.AlterField(
model_name='throughawards',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughawards', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughawards',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughawards_version', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='award',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Award'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='award_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AwardVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughawardsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='funder',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='funder_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwards'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughcontributor', to='share.Change'),
),
migrations.AlterField(
model_name='throughcontributor',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughcontributor',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='related',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributor',
name='related_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributor'),
),
migrations.AlterField(
model_name='throughcontributor',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughcontributor', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughcontributor',
name='subject',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributor',
name='subject_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughcontributor_version', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughcontributorversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='related',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='related_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributor'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='subject',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='subject_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughsubjects', to='share.Change'),
),
migrations.AlterField(
model_name='throughsubjects',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='subject_relations', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughsubjects',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughsubjects',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjects'),
),
migrations.AlterField(
model_name='throughsubjects',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughsubjects', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughsubjects',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughsubjects_version', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughsubjectsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjects'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughtags', to='share.Change'),
),
migrations.AlterField(
model_name='throughtags',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='tag_relations', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughtags',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughtags',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTags'),
),
migrations.AlterField(
model_name='throughtags',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughtags', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughtags',
name='tag',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='work_relations', to='share.Tag'),
),
migrations.AlterField(
model_name='throughtags',
name='tag_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughtags_version', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughtagsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTags'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='tag',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='tag_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_workidentifier', to='share.Change'),
),
migrations.AlterField(
model_name='workidentifier',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='identifiers', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='workidentifier',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='workidentifier',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifier'),
),
migrations.AlterField(
model_name='workidentifier',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifierVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_workidentifier', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='workidentifier',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_workidentifier_version', to='share.WorkIdentifierVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_workidentifierversion', to='share.Change'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifier'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifierVersion'),
),
]
|
Titulacion-Sistemas/PythonTitulacion-EV
|
refs/heads/master
|
Lib/encodings/tis_620.py
|
593
|
""" Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\ufffe'
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
andres-root/AIND
|
refs/heads/master
|
Therm2/rnn/my_answers.py
|
1
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import keras
# TODO: fill out the function below that transforms the input series
# and window-size into a set of input/output pairs for use with our RNN model
def window_transform_series(series, window_size):
# containers for input/output pairs
X = []
y = []
for i in range(len(series)):
fi = i + (window_size)
if fi <= (len(series) - 1):
window = series[i:fi]
X.append(window)
y.append(series[fi])
# reshape each
X = np.asarray(X)
X.shape = (np.shape(X)[0:2])
y = np.asarray(y)
y.shape = (len(y),1)
return X,y
# TODO: build an RNN to perform regression on our time series input/output data
def build_part1_RNN(window_size):
model = Sequential()
model.add(LSTM(5, input_shape=(window_size, 1), activation='tanh'))
model.add(Dense(1, input_shape=(window_size, 1)))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
### TODO: return the text input with only ascii lowercase and the punctuation given below included.
def cleaned_text(text):
allowed = ['a','b','c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z',' ', '!', ',', '.', ':', ';', '?']
text = [c for c in text if c in allowed]
return ''.join(text)
### TODO: fill out the function below that transforms the input text and window-size into a set of input/output pairs for use with our RNN model
def window_transform_text(text, window_size, step_size):
# containers for input/output pairs
inputs = []
outputs = []
current_step = 1
for i in range(len(text)):
if current_step == step_size or i == 0:
fi = i + (window_size)
if fi <= (len(text) - 1):
window = text[i:fi]
inputs.append(window)
outputs.append(text[fi])
current_step = 1
else:
current_step += 1
return inputs,outputs
# TODO build the required RNN model:
# a single LSTM hidden layer with softmax activation, categorical_crossentropy loss
def build_part2_RNN(window_size, num_chars):
model = Sequential()
model.add(LSTM(200, input_shape=(window_size, num_chars), activation='tanh'))
model.add(Dense(num_chars, activation='linear'))
model.add(Dense(num_chars, activation='softmax'))
return model
|
onelab-eu/sfa
|
refs/heads/geni-v3
|
clientbin/sfiAddLinks.py
|
2
|
#! /usr/bin/env python
import sys
from sfa.util.sfalogging import logger
from sfa.client.sfi_commands import Commands
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.version_manager import VersionManager
logger.enable_console()
command = Commands(usage="%prog [options] node1 node2...",
description="Add links to the RSpec. " +
"This command reads in an RSpec and outputs a modified " +
"RSpec. Use this to add links to your slivers")
command.add_linkfile_option()
command.prep()
if not command.opts.linkfile:
print "Missing link list -- exiting"
command.parser.print_help()
sys.exit(1)
if command.opts.infile:
infile=file(command.opts.infile)
else:
infile=sys.stdin
if command.opts.outfile:
outfile=file(command.opts.outfile,"w")
else:
outfile=sys.stdout
ad_rspec = RSpec(infile)
links = file(command.opts.linkfile).read().split('\n')
link_tuples = map(lambda x: tuple(x.split()), links)
version_manager = VersionManager()
try:
type = ad_rspec.version.type
version_num = ad_rspec.version.version
request_version = version_manager._get_version(type, version_num, 'request')
request_rspec = RSpec(version=request_version)
request_rspec.version.merge(ad_rspec)
request_rspec.version.add_link_requests(link_tuples)
except:
logger.log_exc("sfiAddLinks FAILED with links %s" % links)
sys.exit(1)
print >>outfile, request_rspec.toxml()
sys.exit(0)
|
Num98801/Linphone_lib
|
refs/heads/master
|
tools/python/unittests/test_call.py
|
26
|
from nose.tools import assert_equals
import linphone
from linphonetester import *
import os
import time
class TestCall:
def test_early_declined_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
marie.lc.max_calls = 0
out_call = pauline.lc.invite_address(marie.identity)
# Wait until flexisip transfers the busy...
assert_equals(CoreManager.wait_for_until(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallError == 1, 33000), True)
assert_equals(pauline.stats.number_of_LinphoneCallError, 1)
#FIXME http://git.linphone.org/mantis/view.php?id=757
#assert_equals(out_call.reason, linphone.Reason.Busy)
if len(pauline.lc.call_logs) > 0:
out_call_log = pauline.lc.call_logs[0]
assert out_call_log is not None
assert_equals(out_call_log.status, linphone.CallStatus.Aborted)
def test_declined_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
out_call = pauline.lc.invite_address(marie.identity)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: marie.stats.number_of_LinphoneCallIncomingReceived == 1), True)
in_call = marie.lc.current_call
assert in_call is not None
if in_call is not None:
marie.lc.terminate_call(in_call)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: marie.stats.number_of_LinphoneCallReleased == 1), True)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallReleased == 1), True)
assert_equals(marie.stats.number_of_LinphoneCallEnd, 1)
assert_equals(pauline.stats.number_of_LinphoneCallEnd, 1)
assert_equals(in_call.reason, linphone.Reason.Declined)
assert_equals(out_call.reason, linphone.Reason.Declined)
def test_cancelled_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
out_call = pauline.lc.invite_address(marie.identity)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallOutgoingInit == 1), True)
pauline.lc.terminate_call(out_call)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallEnd == 1), True)
assert_equals(pauline.stats.number_of_LinphoneCallEnd, 1)
assert_equals(marie.stats.number_of_LinphoneCallIncomingReceived, 0)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallReleased == 1), True)
def test_early_cancelled_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('empty_rc', check_for_proxies=False)
out_call = pauline.lc.invite_address(marie.identity)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallOutgoingInit == 1), True)
pauline.lc.terminate_call(out_call)
# Since everything is executed in a row, no response can be received from the server, thus the CANCEL cannot be sent.
# It will ring at Marie's side.
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallEnd == 1), True)
assert_equals(pauline.stats.number_of_LinphoneCallEnd, 1)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: marie.stats.number_of_LinphoneCallIncomingReceived == 1), True)
# Now the CANCEL should have been sent and the the call at marie's side should terminate
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: marie.stats.number_of_LinphoneCallEnd == 1), True)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallReleased == 1), True)
def test_cancelled_ringing_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
out_call = pauline.lc.invite_address(marie.identity)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: marie.stats.number_of_LinphoneCallIncomingReceived == 1), True)
pauline.lc.terminate_call(out_call)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: (pauline.stats.number_of_LinphoneCallReleased == 1) and (marie.stats.number_of_LinphoneCallReleased == 1)), True)
assert_equals(marie.stats.number_of_LinphoneCallEnd, 1)
assert_equals(pauline.stats.number_of_LinphoneCallEnd, 1)
def test_call_failed_because_of_codecs(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
marie.disable_all_audio_codecs_except_one('pcmu')
pauline.disable_all_audio_codecs_except_one('pcma')
out_call = pauline.lc.invite_address(marie.identity)
assert_equals(CoreManager.wait_for(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallOutgoingInit == 1), True)
# flexisip will retain the 488 until the "urgent reply" timeout arrives.
assert_equals(CoreManager.wait_for_until(pauline, marie, lambda pauline, marie: pauline.stats.number_of_LinphoneCallError == 1, 7000), True)
assert_equals(out_call.reason, linphone.Reason.NotAcceptable)
assert_equals(marie.stats.number_of_LinphoneCallIncomingReceived, 0)
assert_equals(marie.stats.number_of_LinphoneCallReleased, 0)
def test_simple_call(self):
marie = CoreManager('marie_rc')
pauline = CoreManager('pauline_rc')
assert_equals(CoreManager.call(pauline, marie), True)
#liblinphone_tester_check_rtcp(marie,pauline);
CoreManager.end_call(marie, pauline)
|
jmolloy/pedigree
|
refs/heads/master
|
images/install/disk-contents/libraries/python2.6/getopt.py
|
167
|
# -*- coding: iso-8859-1 -*-
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
|
GinnyN/Team-Fortress-RPG-Generators
|
refs/heads/master
|
tests/regressiontests/admin_util/models.py
|
40
|
from django.db import models
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey('sites.Site', related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', null=True)
def __unicode__(self):
return unicode(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
|
d0ugal-archive/python-rfxcom
|
refs/heads/master
|
tests/protocol/test_status.py
|
4
|
from unittest import TestCase
from rfxcom.protocol.status import Status
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class StatusTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x0D\x01\x00\x01\x02\x53\x45\x00\x0C'
b'\x2F\x01\x01\x00\x00')
self.parser = Status()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result.pop('disabled_protocols'), [
'AD LightwaveRF',
'AE Blyss',
'ATI',
'BlindsT0',
'BlindsT1/T2/T3/T4',
'Byron SX',
'Display undecoded',
'FS20',
'FineOffset/Viking',
'Lighting4',
'Meiantech',
'Mertik',
'ProGuard',
'RFU6',
'RSL',
'Rubicson',
'Visonic',
])
self.assertEquals(result.pop('enabled_protocols'), [
'AC',
'ARC',
'Hideki/UPM',
'HomeEasy EU',
'La Crosse',
'Oregon Scientific',
'X10',
])
self.assertEquals(result, {
'packet_length': 13,
'packet_type': 1,
'packet_type_name': 'Interface message',
'sequence_number': 1,
'sub_type': 0,
'sub_type_name': 'Response on a mode command',
'transceiver_type': 83,
'transceiver_type_text': '433.92MHz transceiver',
'firmware_version': 69,
'command_type': 2,
})
self.assertEquals(str(self.parser), "<Status ID:None>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_namer(self):
self.assertEquals(self.parser.log.name, 'rfxcom.protocol.Status')
|
CiscoSystems/vespa
|
refs/heads/master
|
neutron/tests/unit/openvswitch/test_ovs_tunnel.py
|
4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Dave Lapsley, Nicira Networks, Inc.
import contextlib
import mock
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 42
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS)
LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort:
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding:
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(base.BaseTestCase):
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.addCleanup(mock.patch.stopall)
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tunnel_bridge_mapping'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_OFPORT = 33333
self.VETH_MTU = None
self.inta = mock.Mock()
self.intb = mock.Mock()
self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(),
self.TUN_BRIDGE: mock.Mock(),
self.MAP_TUN_BRIDGE: mock.Mock(),
}
self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start()
self.mock_bridge.side_effect = (lambda br_name, root_helper:
self.ovs_bridges[br_name])
self.mock_bridge_expected = [
mock.call(self.INT_BRIDGE, 'sudo'),
mock.call(self.MAP_TUN_BRIDGE, 'sudo'),
mock.call(self.TUN_BRIDGE, 'sudo'),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.get_local_port_mac.return_value = '000000000001'
self.mock_int_bridge_expected = [
mock.call.get_local_port_mac(),
mock.call.delete_port('patch-tun'),
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
]
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = None
self.mock_map_tun_bridge_expected = [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.delete_port('phy-tunnel_bridge_mapping'),
mock.call.add_port(self.intb),
]
self.mock_int_bridge.add_port.return_value = None
self.mock_int_bridge_expected += [
mock.call.delete_port('int-tunnel_bridge_mapping'),
mock.call.add_port(self.inta)
]
self.inta_expected = [mock.call.link.set_up()]
self.intb_expected = [mock.call.link.set_up()]
self.mock_int_bridge_expected += [
mock.call.add_flow(priority=2, in_port=None, actions='drop')
]
self.mock_map_tun_bridge_expected += [
mock.call.add_flow(priority=2, in_port=None, actions='drop')
]
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_int_bridge.add_patch_port.return_value = self.TUN_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.mock_tun_bridge_expected += [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1,
in_port=self.INT_OFPORT,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN),
mock.call.add_flow(priority=0, actions='drop'),
mock.call.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN),
mock.call.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst=BCAST_MAC,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
]
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.mock_tun_bridge_expected.append(
mock.call.add_flow(
table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop"))
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge_expected += [
mock.call.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.INT_OFPORT)),
mock.call.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
mock.call.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
]
self.device_exists = mock.patch.object(ip_lib, 'device_exists').start()
self.device_exists.return_value = True
self.device_exists_expected = [
mock.call('tunnel_bridge_mapping', 'sudo'),
mock.call('int-tunnel_bridge_mapping', 'sudo'),
]
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipdevice_expected = [
mock.call('int-tunnel_bridge_mapping', 'sudo'),
mock.call().link.delete()
]
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.ipwrapper_expected = [
mock.call('sudo'),
mock.call().add_veth('int-tunnel_bridge_mapping',
'phy-tunnel_bridge_mapping')
]
self.get_bridges = mock.patch.object(ovs_lib, 'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE]
self.get_bridges_expected = [
mock.call('sudo')
]
self.execute = mock.patch('neutron.agent.linux.utils.execute').start()
self.execute_expected = [mock.call(['/sbin/udevadm', 'settle',
'--timeout=10'])]
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(len(mock_obj.mock_calls), len(expected))
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_bridge, self.mock_bridge_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.device_exists, self.device_exists_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
self._verify_mock_calls()
def test_construct_vxlan(self):
with mock.patch.object(ovs_lib, 'get_installed_ovs_klm_version',
return_value="1.10") as klm_ver:
with mock.patch.object(ovs_lib, 'get_installed_ovs_usr_version',
return_value="1.10") as usr_ver:
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['vxlan'],
self.VETH_MTU)
klm_ver.assert_called_once_with()
usr_ver.assert_called_once_with('sudo')
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = ','.join(TUN_OFPORTS[p_const.TYPE_GRE].values())
self.mock_tun_bridge_expected += [
mock.call.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=LV_ID,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(LS_ID, ofports)),
mock.call.add_flow(table=constants.TUN_TABLE['gre'],
priority=1,
tun_id=LS_ID,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(LV_ID, constants.LEARN_FROM_TUN)),
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
action_string = 'strip_vlan,normal'
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LV_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=65535, actions=action_string))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=LV_ID, actions=action_string))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.delete_flows(
table=constants.TUN_TABLE['gre'], tun_id=LS_ID),
mock.call.delete_flows(dl_vlan=LVM.vlan)
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_FLAT.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=65535, in_port=self.INT_OFPORT))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_FLAT
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_FLAT.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_VLAN.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=LV_ID, in_port=self.INT_OFPORT))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_VLAN
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_VLAN.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
self.mock_int_bridge_expected += [
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'tag', str(LVM.vlan)),
mock.call.delete_flows(in_port=VIF_PORT.ofport)
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', ovs_neutron_agent.DEAD_VLAN_TAG),
mock.call.add_flow(priority=2, in_port=VIF_PORT.ofport,
actions='drop')
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = LVM
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1',
'gre', 4789),
mock.call.add_flow(priority=1, in_port=tunnel_port,
actions='resubmit(,2)')
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.10.1',
tunnel_type=p_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply2 = {'current': set(['tap0']),
'added': set([]),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set([])}
with contextlib.nested(
mock.patch.object(log.ContextAdapter, 'exception'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'update_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'process_network_ports')
) as (log_exception, update_ports, process_network_ports):
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
update_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
q_agent = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1',
self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
q_agent.daemon_loop()
except Exception:
pass
log_exception.assert_called_once_with("Error in agent event loop")
update_ports.assert_has_calls([
mock.call(set()),
mock.call(set(['tap0']))
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set([])}),
mock.call({'current': set(['tap2']),
'removed': set([]),
'added': set([])})
])
self._verify_mock_calls()
class TunnelTestWithMTU(TunnelTest):
def setUp(self):
super(TunnelTestWithMTU, self).setUp()
self.VETH_MTU = 1500
self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/modules/source_control/github_key.py
|
44
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: github_key
short_description: Manage GitHub access keys.
description:
- Creates, removes, or updates GitHub access keys.
version_added: "2.2"
options:
token:
description:
- GitHub Access Token with permission to list and create public keys.
required: true
name:
description:
- SSH key name
required: true
pubkey:
description:
- SSH public key value. Required when C(state=present).
state:
description:
- Whether to remove a key, ensure that it exists, or update its value.
choices: ['present', 'absent']
default: 'present'
force:
description:
- The default is C(yes), which will replace the existing remote key
if it's different than C(pubkey). If C(no), the key will only be
set if no key with the given C(name) exists.
type: bool
default: 'yes'
author: Robert Estelle (@erydo)
'''
RETURN = '''
deleted_keys:
description: An array of key objects that were deleted. Only present on state=absent
type: list
returned: When state=absent
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
matching_keys:
description: An array of keys matching the specified name. Only present on state=present
type: list
returned: When state=present
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
key:
description: Metadata about the key just created. Only present on state=present
type: dict
returned: success
sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
'''
EXAMPLES = '''
- name: Read SSH public key to authorize
shell: cat /home/foo/.ssh/id_rsa.pub
register: ssh_pub_key
- name: Authorize key with GitHub
local_action:
module: github_key
name: Access Key for Some Machine
token: '{{ github_access_token }}'
pubkey: '{{ ssh_pub_key.stdout }}'
'''
import json
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
API_BASE = 'https://api.github.com'
class GitHubResponse(object):
def __init__(self, response, info):
self.content = response.read()
self.info = info
def json(self):
return json.loads(self.content)
def links(self):
links = {}
if 'link' in self.info:
link_header = self.info['link']
matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
for url, rel in matches:
links[rel] = url
return links
class GitHubSession(object):
def __init__(self, module, token):
self.module = module
self.token = token
def request(self, method, url, data=None):
headers = {
'Authorization': 'token %s' % self.token,
'Content-Type': 'application/json',
'Accept': 'application/vnd.github.v3+json',
}
response, info = fetch_url(
self.module, url, method=method, data=data, headers=headers)
if not (200 <= info['status'] < 400):
self.module.fail_json(
msg=(" failed to send request %s to %s: %s"
% (method, url, info['msg'])))
return GitHubResponse(response, info)
def get_all_keys(session):
url = API_BASE + '/user/keys'
result = []
while url:
r = session.request('GET', url)
result.extend(r.json())
url = r.links().get('next')
return result
def create_key(session, name, pubkey, check_mode):
if check_mode:
from datetime import datetime
now = datetime.utcnow()
return {
'id': 0,
'key': pubkey,
'title': name,
'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
'read_only': False,
'verified': False
}
else:
return session.request(
'POST',
API_BASE + '/user/keys',
data=json.dumps({'title': name, 'key': pubkey})).json()
def delete_keys(session, to_delete, check_mode):
if check_mode:
return
for key in to_delete:
session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
def ensure_key_absent(session, name, check_mode):
to_delete = [key for key in get_all_keys(session) if key['title'] == name]
delete_keys(session, to_delete, check_mode=check_mode)
return {'changed': bool(to_delete),
'deleted_keys': to_delete}
def ensure_key_present(module, session, name, pubkey, force, check_mode):
all_keys = get_all_keys(session)
matching_keys = [k for k in all_keys if k['title'] == name]
deleted_keys = []
new_signature = pubkey.split(' ')[1]
for key in all_keys:
existing_signature = key['key'].split(' ')[1]
if new_signature == existing_signature and key['title'] != name:
module.fail_json(msg=(
"another key with the same content is already registered "
"under the name |{0}|").format(key['title']))
if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
delete_keys(session, matching_keys, check_mode=check_mode)
(deleted_keys, matching_keys) = (matching_keys, [])
if not matching_keys:
key = create_key(session, name, pubkey, check_mode=check_mode)
else:
key = matching_keys[0]
return {
'changed': bool(deleted_keys or not matching_keys),
'deleted_keys': deleted_keys,
'matching_keys': matching_keys,
'key': key
}
def main():
argument_spec = {
'token': {'required': True, 'no_log': True},
'name': {'required': True},
'pubkey': {},
'state': {'choices': ['present', 'absent'], 'default': 'present'},
'force': {'default': True, 'type': 'bool'},
}
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
token = module.params['token']
name = module.params['name']
state = module.params['state']
force = module.params['force']
pubkey = module.params.get('pubkey')
if pubkey:
pubkey_parts = pubkey.split(' ')
# Keys consist of a protocol, the key data, and an optional comment.
if len(pubkey_parts) < 2:
module.fail_json(msg='"pubkey" parameter has an invalid format')
elif state == 'present':
module.fail_json(msg='"pubkey" is required when state=present')
session = GitHubSession(module, token)
if state == 'present':
result = ensure_key_present(module, session, name, pubkey, force=force,
check_mode=module.check_mode)
elif state == 'absent':
result = ensure_key_absent(session, name, check_mode=module.check_mode)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
westial/restfulcomm
|
refs/heads/master
|
restfulcomm/http/jsonresponse.py
|
1
|
# -*- coding: utf-8 -*-
"""Value object product of a json format response got from a server as plain
text.
"""
import base64
import json
from restfulcomm.http.superjson import BaseJson
from restfulcomm.core.helpers import HttpHelper
from requests.models import Response
class JsonResponse(BaseJson):
def to_dict(self):
"""Return the object attributes dict. If content type is not plain text
encode the body by base64 codec"""
attributes = dict()
for key in self.__dir__():
value = getattr(self, key)
if key == 'body' and not HttpHelper.is_plain_content_type(
self.headers['Content-Type']):
value = base64.b64encode(value).decode()
attributes[key] = value
return attributes
def __init__(self):
self._status = None
self._body = None
self._headers = None
def __dir__(self):
return [
'status',
'body',
'headers'
]
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
self._headers = value
@classmethod
def plain_factory(cls, content):
"""Given a plain text json formatted content sets the context attributes
Args:
content: str json formatted content
Return:
JsonResponse object
"""
if type(content) == bytes:
content = content.decode('utf-8')
json_map = json.loads(content)
json_response = JsonResponse()
json_response.headers = json_map['headers']
json_response.status = json_map['status']
if 'body' in json_map:
if HttpHelper.is_plain_content_type(json_response.headers['Content-Type']):
json_response.body = json_map['body']
else:
json_response.body = base64.b64decode(json_map['body'])
else:
json_response.body = None
return json_response
@classmethod
def http_factory(cls, response: Response):
"""Return a JsonResponse object by the given HTTP Response
Args:
response: Response object
Return:
JsonResponse
"""
json_response = JsonResponse()
if response.encoding:
json_response.body = response.content.decode(response.encoding)
else:
json_response.body = response.content
json_response.status = response.status_code
json_response.headers = dict(response.headers)
return json_response
|
aperigault/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/infinidat/infini_fs.py
|
44
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_fs
version_added: 2.3
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- File system name.
required: true
state:
description:
- Creates/Modifies file system when present or removes when absent.
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- File system size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that will host file system.
required: true
extends_documentation_fragment:
- infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
import traceback
CAPACITY_IMP_ERR = None
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
CAPACITY_IMP_ERR = traceback.format_exc()
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except Exception:
return None
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['name'])
except Exception:
return None
@api_wrapper
def create_filesystem(module, system):
"""Create Filesystem"""
if not module.check_mode:
filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
filesystem.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_filesystem(module, filesystem):
"""Update Filesystem"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if filesystem.get_size() != size:
if not module.check_mode:
filesystem.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_filesystem(module, filesystem):
""" Delete Filesystem"""
if not module.check_mode:
filesystem.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
pool=dict(required=True),
size=dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_CAPACITY:
module.fail_json(msg=missing_required_lib('capacity'), exception=CAPACITY_IMP_ERR)
if module.params['size']:
try:
Capacity(module.params['size'])
except Exception:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
if state == 'present' and not filesystem:
create_filesystem(module, system)
elif state == 'present' and filesystem:
update_filesystem(module, filesystem)
elif state == 'absent' and filesystem:
delete_filesystem(module, filesystem)
elif state == 'absent' and not filesystem:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
shakamunyi/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/gaussian_conjugate_posteriors_test.py
|
5
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
distributions = tf.contrib.distributions
class GaussianTest(tf.test.TestCase):
def testGaussianConjugateKnownSigmaPosterior(self):
with tf.Session():
mu0 = tf.constant([3.0])
sigma0 = tf.constant([math.sqrt(10.0)])
sigma = tf.constant([math.sqrt(2.0)])
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
posterior_log_pdf = posterior.log_pdf(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6,))
def testGaussianConjugateKnownSigmaPosteriorND(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([[3.0, -3.0]] * batch_size)
sigma0 = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
sigma = tf.constant([[math.sqrt(2.0)]] * batch_size)
x = tf.transpose(
tf.constant([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=tf.float32))
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
posterior_log_pdf = posterior.log_pdf(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6, 2))
def testGaussianConjugateKnownSigmaNDPosteriorND(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([[3.0, -3.0]] * batch_size)
sigma0 = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
sigma = tf.constant([[math.sqrt(2.0), math.sqrt(4.0)]] * batch_size)
x = tf.constant([
[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0],
[2.5, -2.5, -4.0, 0.0, 1.0, -2.0]], dtype=tf.float32)
s = tf.reduce_sum(x, reduction_indices=[1])
x = tf.transpose(x) # Reshape to shape (6, 2)
n = tf.constant([6] * 2)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
# Calculate log_pdf under the 2 models
posterior_log_pdf = posterior.log_pdf(x)
self.assertEqual(posterior_log_pdf.get_shape(), (6, 2))
self.assertEqual(posterior_log_pdf.eval().shape, (6, 2))
def testGaussianConjugateKnownSigmaPredictive(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([3.0] * batch_size)
sigma0 = tf.constant([math.sqrt(10.0)] * batch_size)
sigma = tf.constant([math.sqrt(2.0)] * batch_size)
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
predictive = distributions.gaussian_congugates_known_sigma_predictive(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(predictive, distributions.Gaussian))
predictive_log_pdf = predictive.log_pdf(x).eval()
self.assertEqual(predictive_log_pdf.shape, (6,))
if __name__ == '__main__':
tf.test.main()
|
ayushin78/coala
|
refs/heads/master
|
tests/processes/BearRunningTest.py
|
21
|
import multiprocessing
import queue
import unittest
from coalib.bears.GlobalBear import GlobalBear
from coalib.bears.LocalBear import LocalBear
from coalib.processes.BearRunning import (
LOG_LEVEL, LogMessage, run, send_msg, task_done)
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.settings.Section import Section
class LocalTestBear(LocalBear):
def run(self, filename, file):
if filename == 'file1':
raise Exception('Just to throw anything here.')
return [Result.from_values('LocalTestBear',
'something went wrong',
filename)]
class SimpleBear(LocalBear):
def run(self,
filename,
file,
*args,
dependency_results=None,
**kwargs):
return [Result.from_values('SimpleBear',
'something went wrong',
filename),
# This result should not be passed to DependentBear
Result.from_values('FakeBear',
'something went wrong',
filename),
Result.from_values('SimpleBear',
'another thing went wrong',
filename)]
class DependentBear(LocalBear):
BEAR_DEPS = {SimpleBear}
def run(self,
filename,
file,
*args,
dependency_results=None,
**kwargs):
assert len(dependency_results['SimpleBear']) == 2
class SimpleGlobalBear(GlobalBear):
def run(self,
*args,
dependency_results=None,
**kwargs):
return [Result('SimpleGlobalBear', 'something went wrong'),
# This result should not be passed to DependentBear
Result('FakeBear', 'something went wrong'),
Result('SimpleGlobalBear', 'another thing went wrong')]
class DependentGlobalBear(GlobalBear):
BEAR_DEPS = {SimpleGlobalBear}
def run(self,
*args,
dependency_results=None,
**kwargs):
assert len(dependency_results['SimpleGlobalBear']) == 3
class GlobalTestBear(GlobalBear):
def run(self):
result = []
for file, contents in self.file_dict.items():
result.append(Result.from_values('GlobalTestBear',
'Files are bad in general!',
file,
severity=RESULT_SEVERITY.INFO))
return result
class EvilBear(LocalBear):
def execute(self, *args, **kwargs):
raise NotImplementedError
class UnexpectedBear1(LocalBear):
def run(self, filename, file):
return [1,
Result('UnexpectedBear1', 'test result')]
class UnexpectedBear2(LocalBear):
def run(self, filename, file):
return 1
class BearRunningUnitTest(unittest.TestCase):
def setUp(self):
self.settings = Section('name')
self.file_name_queue = queue.Queue()
self.local_bear_list = []
self.global_bear_list = []
self.global_bear_queue = queue.Queue()
self.file_dict = {}
manager = multiprocessing.Manager()
self.local_result_dict = manager.dict()
self.global_result_dict = manager.dict()
self.message_queue = queue.Queue()
self.control_queue = queue.Queue()
def test_queue_done_marking(self):
self.message_queue.put('test')
task_done(self.message_queue) # Should make the queue joinable
self.message_queue.join()
task_done('test') # Should pass silently
def test_messaging(self):
send_msg(self.message_queue,
0,
LOG_LEVEL.DEBUG,
'test',
'messag',
delimiter='-',
end='e')
self.assertEqual(self.message_queue.get(),
LogMessage(LOG_LEVEL.DEBUG, 'test-message'))
def test_dependencies(self):
self.local_bear_list.append(SimpleBear(self.settings,
self.message_queue))
self.local_bear_list.append(DependentBear(self.settings,
self.message_queue))
self.global_bear_list.append(SimpleGlobalBear({},
self.settings,
self.message_queue))
self.global_bear_list.append(DependentGlobalBear({},
self.settings,
self.message_queue))
self.global_bear_queue.put(1)
self.global_bear_queue.put(0)
self.file_name_queue.put('t')
self.file_dict['t'] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
try:
while True:
msg = self.message_queue.get(timeout=0)
self.assertEqual(msg.log_level, LOG_LEVEL.DEBUG)
except queue.Empty:
pass
def test_evil_bear(self):
self.local_bear_list.append(EvilBear(self.settings,
self.message_queue))
self.file_name_queue.put('t')
self.file_dict['t'] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
def test_strange_bear(self):
self.local_bear_list.append(UnexpectedBear1(self.settings,
self.message_queue))
self.local_bear_list.append(UnexpectedBear2(self.settings,
self.message_queue))
self.file_name_queue.put('t')
self.file_dict['t'] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
expected_messages = [LOG_LEVEL.DEBUG,
LOG_LEVEL.ERROR,
LOG_LEVEL.DEBUG,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING]
for msg in expected_messages:
self.assertEqual(msg, self.message_queue.get(timeout=0).log_level)
class BearRunningIntegrationTest(unittest.TestCase):
example_file = """a
b
c
d
"""
def setUp(self):
self.settings = Section('name')
self.file_name_queue = queue.Queue()
self.local_bear_list = []
self.global_bear_list = []
self.global_bear_queue = queue.Queue()
self.file_dict = {}
manager = multiprocessing.Manager()
self.local_result_dict = manager.dict()
self.global_result_dict = manager.dict()
self.message_queue = queue.Queue()
self.control_queue = queue.Queue()
self.file1 = 'file1'
self.file2 = 'arbitrary'
self.file_name_queue.put(self.file1)
self.file_name_queue.put(self.file2)
self.file_name_queue.put('invalid file')
self.local_bear_list.append(LocalTestBear(self.settings,
self.message_queue))
self.local_bear_list.append('not a valid bear')
self.file_dict[self.file1] = self.example_file
self.file_dict[self.file2] = self.example_file
self.global_bear_list.append(GlobalTestBear(self.file_dict,
self.settings,
self.message_queue))
self.global_bear_list.append('not a valid bear')
self.global_bear_queue.put(0)
self.global_bear_queue.put(1)
def test_run(self):
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
expected_messages = [LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.ERROR,
LOG_LEVEL.DEBUG,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING]
for msg in expected_messages:
self.assertEqual(msg, self.message_queue.get(timeout=0).log_level)
local_result_expected = [[],
[Result.from_values('LocalTestBear',
'something went wrong',
'arbitrary')]
]
for expected in local_result_expected:
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.LOCAL)
real = self.local_result_dict[index]
self.assertEqual(real, expected)
global_results_expected = [Result.from_values(
'GlobalTestBear',
'Files are bad in general!',
'file1',
severity=RESULT_SEVERITY.INFO),
Result.from_values(
'GlobalTestBear',
'Files are bad in general!',
'arbitrary',
severity=RESULT_SEVERITY.INFO)]
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.LOCAL_FINISHED)
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.GLOBAL)
real = self.global_result_dict[index]
self.assertEqual(sorted(global_results_expected), sorted(real))
control_elem, none = self.control_queue.get(timeout=0)
self.assertEqual(control_elem, CONTROL_ELEMENT.GLOBAL_FINISHED)
self.assertEqual(none, None)
# The invalid bear gets a None in that dict for dependency resolution
self.assertEqual(len(self.global_result_dict), 2)
self.assertEqual(len(self.local_result_dict),
len(local_result_expected))
self.assertRaises(queue.Empty, self.message_queue.get, timeout=0)
self.assertRaises(queue.Empty, self.control_queue.get, timeout=0)
|
cloudxaas/cloudauth
|
refs/heads/master
|
idp/__init__.py
|
1
|
#
__all__ =[]
|
telwertowski/Books-Mac-OS-X
|
refs/heads/master
|
Versions/Books_3.0b3/Amazon (DE).plugin/Contents/Resources/amazon.py
|
126
|
"""Python wrapper
for Amazon web APIs
This module allows you to access Amazon's web APIs,
to do things like search Amazon and get the results programmatically.
Described here:
http://www.amazon.com/webservices
You need a Amazon-provided license key to use these services.
Follow the link above to get one. These functions will look in
several places (in this order) for the license key:
- the "license_key" argument of each function
- the module-level LICENSE_KEY variable (call setLicense once to set it)
- an environment variable called AMAZON_LICENSE_KEY
- a file called ".amazonkey" in the current directory
- a file called "amazonkey.txt" in the current directory
- a file called ".amazonkey" in your home directory
- a file called "amazonkey.txt" in your home directory
- a file called ".amazonkey" in the same directory as amazon.py
- a file called "amazonkey.txt" in the same directory as amazon.py
Sample usage:
>>> import amazon
>>> amazon.setLicense('...') # must get your own key!
>>> pythonBooks = amazon.searchByKeyword('Python')
>>> pythonBooks[0].ProductName
u'Learning Python (Help for Programmers)'
>>> pythonBooks[0].URL
...
>>> pythonBooks[0].OurPrice
...
Other available functions:
- browseBestSellers
- searchByASIN
- searchByUPC
- searchByAuthor
- searchByArtist
- searchByActor
- searchByDirector
- searchByManufacturer
- searchByListMania
- searchSimilar
- searchByWishlist
Other usage notes:
- Most functions can take product_line as well, see source for possible values
- All functions can take type="lite" to get less detail in results
- All functions can take page=N to get second, third, fourth page of results
- All functions can take license_key="XYZ", instead of setting it globally
- All functions can take http_proxy="http://x/y/z" which overrides your system setting
"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "0.64.1"
__cvsversion__ = "$Revision: 1.12 $"[11:-2]
__date__ = "$Date: 2004/07/02 13:24:09 $"[7:-2]
__copyright__ = "Copyright (c) 2002 Mark Pilgrim"
__license__ = "Python"
# Powersearch and return object type fix by Joseph Reagle <geek@goatee.net>
# Locale support by Michael Josephson <mike@josephson.org>
# Modification to _contentsOf to strip trailing whitespace when loading Amazon key
# from a file submitted by Patrick Phalen.
# Support for specifying locale and associates ID as search parameters and
# internationalisation fix for the SalesRank integer conversion by
# Christian Theune <ct@gocept.com>, gocept gmbh & co. kg
# Support for BlendedSearch contributed by Alex Choo
from xml.dom import minidom
import os, sys, getopt, cgi, urllib, string
try:
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
timeoutsocket.setDefaultSocketTimeout(10)
except ImportError:
pass
LICENSE_KEY = "1M21AJ49MF6Y0DJ4D1G2"
ASSOCIATE = "aetherialnu0a-20"
HTTP_PROXY = None
LOCALE = "us"
# don't touch the rest of these constants
class AmazonError(Exception): pass
class NoLicenseKey(Exception): pass
_amazonfile1 = ".amazonkey"
_amazonfile2 = "amazonkey.txt"
_licenseLocations = (
(lambda key: key, 'passed to the function in license_key variable'),
(lambda key: LICENSE_KEY, 'module-level LICENSE_KEY variable (call setLicense to set it)'),
(lambda key: os.environ.get('AMAZON_LICENSE_KEY', None), 'an environment variable called AMAZON_LICENSE_KEY'),
(lambda key: _contentsOf(os.getcwd(), _amazonfile1), '%s in the current directory' % _amazonfile1),
(lambda key: _contentsOf(os.getcwd(), _amazonfile2), '%s in the current directory' % _amazonfile2),
(lambda key: _contentsOf(os.environ.get('HOME', ''), _amazonfile1), '%s in your home directory' % _amazonfile1),
(lambda key: _contentsOf(os.environ.get('HOME', ''), _amazonfile2), '%s in your home directory' % _amazonfile2),
(lambda key: _contentsOf(_getScriptDir(), _amazonfile1), '%s in the amazon.py directory' % _amazonfile1),
(lambda key: _contentsOf(_getScriptDir(), _amazonfile2), '%s in the amazon.py directory' % _amazonfile2)
)
_supportedLocales = {
"us" : (None, "xml.amazon.com"),
"uk" : ("uk", "xml-eu.amazon.com"),
"de" : ("de", "xml-eu.amazon.com"),
"jp" : ("jp", "xml.amazon.co.jp"),
"ca" : ("ca", "xml.amazon.ca"),
"fr" : ("fr", "xml.amazon.fr")
}
## administrative functions
def version():
print """PyAmazon %(__version__)s
%(__copyright__)s
released %(__date__)s
""" % globals()
def setAssociate(associate):
global ASSOCIATE
ASSOCIATE=associate
def getAssociate(override=None):
return override or ASSOCIATE
## utility functions
def _checkLocaleSupported(locale):
if not _supportedLocales.has_key(locale):
raise AmazonError, ("Unsupported locale. Locale must be one of: %s" %
string.join(_supportedLocales, ", "))
def setLocale(locale):
"""set locale"""
global LOCALE
_checkLocaleSupported(locale)
LOCALE = locale
def getLocale(locale=None):
"""get locale"""
return locale or LOCALE
def setLicense(license_key):
"""set license key"""
global LICENSE_KEY
LICENSE_KEY = license_key
def getLicense(license_key = None):
"""get license key
license key can come from any number of locations;
see module docs for search order"""
for get, location in _licenseLocations:
rc = get(license_key)
if rc: return rc
raise NoLicenseKey, 'get a license key at http://www.amazon.com/webservices'
def setProxy(http_proxy):
"""set HTTP proxy"""
global HTTP_PROXY
HTTP_PROXY = http_proxy
def getProxy(http_proxy = None):
"""get HTTP proxy"""
return http_proxy or HTTP_PROXY
def getProxies(http_proxy = None):
http_proxy = getProxy(http_proxy)
if http_proxy:
proxies = {"http": http_proxy}
else:
proxies = None
return proxies
def _contentsOf(dirname, filename):
filename = os.path.join(dirname, filename)
if not os.path.exists(filename): return None
fsock = open(filename)
contents = fsock.read().strip()
fsock.close()
return contents
def _getScriptDir():
if __name__ == '__main__':
return os.path.abspath(os.path.dirname(sys.argv[0]))
else:
return os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
class Bag: pass
def unmarshal(element):
rc = Bag()
if isinstance(element, minidom.Element) and (element.tagName == 'Details'):
rc.URL = element.attributes["url"].value
childElements = [e for e in element.childNodes if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
if type(getattr(rc, key)) <> type([]):
setattr(rc, key, [getattr(rc, key)])
setattr(rc, key, getattr(rc, key) + [unmarshal(child)])
elif isinstance(child, minidom.Element) and (child.tagName == 'Details'):
# make the first Details element a key
setattr(rc,key,[unmarshal(child)])
#dbg: because otherwise 'hasattr' only tests
#dbg: on the second occurence: if there's a
#dbg: single return to a query, it's not a
#dbg: list. This module should always
#dbg: return a list of Details objects.
else:
setattr(rc, key, unmarshal(child))
else:
rc = "".join([e.data for e in element.childNodes if isinstance(e, minidom.Text)])
if element.tagName == 'SalesRank':
rc = rc.replace('.', '')
rc = rc.replace(',', '')
rc = rc.replace(' ', '')
rc = int(rc)
return rc
def buildURL(search_type, keyword, product_line, type, page, license_key, locale, associate, mode):
_checkLocaleSupported(locale)
url = "http://" + _supportedLocales[locale][1] + "/onca/xml3?f=xml"
url += "&t=%s" % associate
url += "&dev-t=%s" % license_key.strip()
url += "&type=%s" % type
if _supportedLocales[locale][0]:
url += "&locale=%s" % _supportedLocales[locale][0]
if page:
url += "&page=%s" % page
if product_line:
url += "&mode=%s" % mode
url += "&%s=%s" % (search_type, urllib.quote(keyword))
return url
## main functions
def search(search_type, keyword, product_line, type = "heavy", page = None,
license_key=None, http_proxy = None, locale = None, associate = None, mode = None):
"""search Amazon
You need a license key to call this function; see
http://www.amazon.com/webservices
to get one. Then you can either pass it to
this function every time, or set it globally; see the module docs for details.
Parameters:
keyword - keyword to search
search_type - in (KeywordSearch, BrowseNodeSearch, AsinSearch, UpcSearch, AuthorSearch, ArtistSearch, ActorSearch, DirectorSearch, ManufacturerSearch, ListManiaSearch, SimilaritySearch)
product_line - type of product to search for. restrictions based on search_type
UpcSearch - in (music, classical)
AuthorSearch - must be "books"
ArtistSearch - in (music, classical)
ActorSearch - in (dvd, vhs, video)
DirectorSearch - in (dvd, vhs, video)
ManufacturerSearch - in (electronics, kitchen, videogames, software, photo, pc-hardware)
http_proxy (optional) - address of HTTP proxy to use for sending and receiving SOAP messages
Returns: list of Bags, each Bag may contain the following attributes:
Asin - Amazon ID ("ASIN" number) of this item
Authors - list of authors
Availability - "available", etc.
BrowseList - list of related categories
Catalog - catalog type ("Book", etc)
CollectiblePrice - ?, format "$34.95"
ImageUrlLarge - URL of large image of this item
ImageUrlMedium - URL of medium image of this item
ImageUrlSmall - URL of small image of this item
Isbn - ISBN number
ListPrice - list price, format "$34.95"
Lists - list of ListMania lists that include this item
Manufacturer - manufacturer
Media - media ("Paperback", "Audio CD", etc)
NumMedia - number of different media types in which this item is available
OurPrice - Amazon price, format "$24.47"
ProductName - name of this item
ReleaseDate - release date, format "09 April, 1999"
Reviews - reviews (AvgCustomerRating, plus list of CustomerReview with Rating, Summary, Content)
SalesRank - sales rank (integer)
SimilarProducts - list of Product, which is ASIN number
ThirdPartyNewPrice - ?, format "$34.95"
URL - URL of this item
"""
license_key = getLicense(license_key)
locale = getLocale(locale)
associate = getAssociate(associate)
url = buildURL(search_type, keyword, product_line, type, page,
license_key, locale, associate, mode)
proxies = getProxies(http_proxy)
u = urllib.FancyURLopener(proxies)
usock = u.open(url)
xmldoc = minidom.parse(usock)
# from xml.dom.ext import PrettyPrint
# PrettyPrint(xmldoc)
usock.close()
if search_type == "BlendedSearch":
data = unmarshal(xmldoc).BlendedSearch
else:
data = unmarshal(xmldoc).ProductInfo
if hasattr(data, 'ErrorMsg'):
raise AmazonError, data.ErrorMsg
else:
if search_type == "BlendedSearch":
# a list of ProductLine containing a list of ProductInfo
# containing a list of Details.
return data
else:
return data.Details
def searchByKeyword(keyword, product_line="books", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("KeywordSearch", keyword, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def browseBestSellers(browse_node, product_line="books", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("BrowseNodeSearch", browse_node, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def searchByASIN(ASIN, type="heavy", license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("AsinSearch", ASIN, None, type, None, license_key, http_proxy, locale, associate, mode)
def searchByUPC(UPC, type="heavy", license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("UpcSearch", UPC, None, type, None, license_key, http_proxy, locale, associate, mode)
def searchByAuthor(author, type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("AuthorSearch", author, "books", type, page, license_key, http_proxy, locale, associate, mode)
def searchByArtist(artist, product_line="music", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
if product_line not in ("music", "classical"):
raise AmazonError, "product_line must be in ('music', 'classical')"
return search("ArtistSearch", artist, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def searchByActor(actor, product_line="dvd", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
if product_line not in ("dvd", "vhs", "video"):
raise AmazonError, "product_line must be in ('dvd', 'vhs', 'video')"
return search("ActorSearch", actor, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def searchByDirector(director, product_line="dvd", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
if product_line not in ("dvd", "vhs", "video"):
raise AmazonError, "product_line must be in ('dvd', 'vhs', 'video')"
return search("DirectorSearch", director, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def searchByManufacturer(manufacturer, product_line="pc-hardware", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
if product_line not in ("electronics", "kitchen", "videogames", "software", "photo", "pc-hardware"):
raise AmazonError, "product_line must be in ('electronics', 'kitchen', 'videogames', 'software', 'photo', 'pc-hardware')"
return search("ManufacturerSearch", manufacturer, product_line, type, page, license_key, http_proxy, locale, associate, mode)
def searchByListMania(listManiaID, type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("ListManiaSearch", listManiaID, None, type, page, license_key, http_proxy, locale, associate, mode)
def searchSimilar(ASIN, type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("SimilaritySearch", ASIN, None, type, page, license_key, http_proxy, locale, associate, mode)
def searchByWishlist(wishlistID, type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("WishlistSearch", wishlistID, None, type, page, license_key, http_proxy, locale, associate,mode)
def searchByPower(keyword, product_line="books", type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("PowerSearch", keyword, product_line, type, page, license_key, http_proxy, locale, associate, mode)
# >>> RecentKing = amazon.searchByPower('author:Stephen King and pubdate:2003')
# >>> SnowCrash = amazon.searchByPower('title:Snow Crash')
def searchByBlended(keyword, type="heavy", page=1, license_key=None, http_proxy=None, locale=None, associate=None, mode="books"):
return search("BlendedSearch", keyword, None, type, page, license_key, http_proxy, locale, associate, mode)
|
yeyanchao/calibre
|
refs/heads/master
|
src/calibre/ebooks/pdf/outline_writer.py
|
9
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from collections import defaultdict
class Outline(object):
def __init__(self, toc, items):
self.toc = toc
self.items = items
self.anchor_map = {}
self.pos_map = defaultdict(dict)
self.toc_map = {}
for item in items:
self.anchor_map[item] = anchors = set()
item_path = os.path.abspath(item).replace('/', os.sep)
if self.toc is not None:
for x in self.toc.flat():
if x.abspath != item_path: continue
x.outline_item_ = item
if x.fragment:
anchors.add(x.fragment)
def set_pos(self, item, anchor, pagenum, ypos):
self.pos_map[item][anchor] = (pagenum, ypos)
def get_pos(self, toc):
page, ypos = 0, 0
item = getattr(toc, 'outline_item_', None)
if item is not None:
# First use the item URL without fragment
page, ypos = self.pos_map.get(item, {}).get(None, (0, 0))
if toc.fragment:
amap = self.pos_map.get(item, None)
if amap is not None:
page, ypos = amap.get(toc.fragment, (page, ypos))
return page, ypos
def add_children(self, toc, parent):
for child in toc:
page, ypos = self.get_pos(child)
text = child.text or _('Page %d')%page
if page >= self.page_count:
page = self.page_count - 1
cn = parent.create(text, page, True)
self.add_children(child, cn)
def __call__(self, doc):
self.pos_map = dict(self.pos_map)
self.page_count = doc.page_count()
for child in self.toc:
page, ypos = self.get_pos(child)
text = child.text or _('Page %d')%page
if page >= self.page_count:
page = self.page_count - 1
node = doc.create_outline(text, page)
self.add_children(child, node)
|
hguemar/cinder
|
refs/heads/master
|
cinder/brick/remotefs/remotefs.py
|
5
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remote filesystem client utilities."""
import hashlib
import os
import re
from oslo_concurrency import processutils as putils
import six
from cinder.brick import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class RemoteFsClient(object):
def __init__(self, mount_type, root_helper,
execute=putils.execute, *args, **kwargs):
self._mount_type = mount_type
if mount_type == "nfs":
self._mount_base = kwargs.get('nfs_mount_point_base', None)
if not self._mount_base:
raise exception.InvalidParameterValue(
err=_('nfs_mount_point_base required'))
self._mount_options = kwargs.get('nfs_mount_options', None)
self._check_nfs_options()
elif mount_type == "cifs":
self._mount_base = kwargs.get('smbfs_mount_point_base', None)
if not self._mount_base:
raise exception.InvalidParameterValue(
err=_('smbfs_mount_point_base required'))
self._mount_options = kwargs.get('smbfs_mount_options', None)
elif mount_type == "glusterfs":
self._mount_base = kwargs.get('glusterfs_mount_point_base', None)
if not self._mount_base:
raise exception.InvalidParameterValue(
err=_('glusterfs_mount_point_base required'))
self._mount_options = None
else:
raise exception.ProtocolNotSupported(protocol=mount_type)
self.root_helper = root_helper
self.set_execute(execute)
def set_execute(self, execute):
self._execute = execute
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str
(in a hex format).
"""
return hashlib.md5(base_str).hexdigest()
def get_mount_point(self, device_name):
"""Get Mount Point.
:param device_name: example 172.18.194.100:/var/nfs
"""
return os.path.join(self._mount_base,
self._get_hash_str(device_name))
def _read_mounts(self):
(out, _err) = self._execute('mount', check_exit_code=0)
lines = out.split('\n')
mounts = {}
for line in lines:
tokens = line.split()
if 2 < len(tokens):
device = tokens[0]
mnt_point = tokens[2]
mounts[mnt_point] = device
return mounts
def mount(self, share, flags=None):
"""Mount given share."""
mount_path = self.get_mount_point(share)
if mount_path in self._read_mounts():
LOG.info(_LI('Already mounted: %s') % mount_path)
return
self._execute('mkdir', '-p', mount_path, check_exit_code=0)
if self._mount_type == 'nfs':
self._mount_nfs(share, mount_path, flags)
else:
self._do_mount(self._mount_type, share, mount_path,
self._mount_options, flags)
def _do_mount(self, mount_type, share, mount_path, mount_options=None,
flags=None):
"""Mounts share based on the specified params."""
mnt_cmd = ['mount', '-t', mount_type]
if mount_options is not None:
mnt_cmd.extend(['-o', mount_options])
if flags is not None:
mnt_cmd.extend(flags)
mnt_cmd.extend([share, mount_path])
self._execute(*mnt_cmd, root_helper=self.root_helper,
run_as_root=True, check_exit_code=0)
def _mount_nfs(self, nfs_share, mount_path, flags=None):
"""Mount nfs share using present mount types."""
mnt_errors = {}
# This loop allows us to first try to mount with NFS 4.1 for pNFS
# support but falls back to mount NFS 4 or NFS 3 if either the client
# or server do not support it.
for mnt_type in sorted(self._nfs_mount_type_opts.keys(), reverse=True):
options = self._nfs_mount_type_opts[mnt_type]
try:
self._do_mount('nfs', nfs_share, mount_path, options, flags)
LOG.debug('Mounted %(sh)s using %(mnt_type)s.'
% {'sh': nfs_share, 'mnt_type': mnt_type})
return
except Exception as e:
mnt_errors[mnt_type] = six.text_type(e)
LOG.debug('Failed to do %s mount.', mnt_type)
raise exception.BrickException(_("NFS mount failed for share %(sh)s. "
"Error - %(error)s")
% {'sh': nfs_share,
'error': mnt_errors})
def _check_nfs_options(self):
"""Checks and prepares nfs mount type options."""
self._nfs_mount_type_opts = {'nfs': self._mount_options}
nfs_vers_opt_patterns = ['^nfsvers', '^vers', '^v[\d]']
for opt in nfs_vers_opt_patterns:
if self._option_exists(self._mount_options, opt):
return
# pNFS requires NFS 4.1. The mount.nfs4 utility does not automatically
# negotiate 4.1 support, we have to ask for it by specifying two
# options: vers=4 and minorversion=1.
pnfs_opts = self._update_option(self._mount_options, 'vers', '4')
pnfs_opts = self._update_option(pnfs_opts, 'minorversion', '1')
self._nfs_mount_type_opts['pnfs'] = pnfs_opts
def _option_exists(self, options, opt_pattern):
"""Checks if the option exists in nfs options and returns position."""
options = [x.strip() for x in options.split(',')] if options else []
pos = 0
for opt in options:
pos = pos + 1
if re.match(opt_pattern, opt, flags=0):
return pos
return 0
def _update_option(self, options, option, value=None):
"""Update option if exists else adds it and returns new options."""
opts = [x.strip() for x in options.split(',')] if options else []
pos = self._option_exists(options, option)
if pos:
opts.pop(pos - 1)
opt = '%s=%s' % (option, value) if value else option
opts.append(opt)
return ",".join(opts) if len(opts) > 1 else opts[0]
|
gacarrillor/QGIS
|
refs/heads/master
|
python/plugins/grassprovider/ext/i_in_spotvgt.py
|
45
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
i_in_spotvgt.py
---------------
Date : April 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'April 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processInputs(alg, parameters, context, feedback):
# Here, we apply directly the algorithm
# So we just need to get the projection of the layer !
layer = alg.parameterAsRasterLayer(parameters, 'input', context)
alg.setSessionProjectionFromLayer(layer)
|
ahotam/micropython
|
refs/heads/master
|
tests/basics/dict_construct.py
|
117
|
# dict constructor
d = dict()
print(d)
d = dict({1:2})
print(d)
d = dict(a=1)
print(d)
d = dict({1:2}, a=3)
print(d[1], d['a'])
d = dict([(1, 2)], a=3, b=4)
print(d[1], d['a'], d['b'])
|
kellinm/anaconda
|
refs/heads/master
|
pyanaconda/anaconda_log.py
|
3
|
#
# anaconda_log.py: Support for logging to multiple destinations with log
# levels.
#
# Copyright (C) 2000, 2001, 2002, 2005 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Chris Lumens <clumens@redhat.com>
# Matt Wilson <msw@redhat.com>
# Michael Fulbright <msf@redhat.com>
#
import logging
from logging.handlers import SysLogHandler, SocketHandler, SYSLOG_UDP_PORT
import os
import sys
import warnings
from pyanaconda.flags import flags
from pyanaconda.constants import LOGLVL_LOCK
DEFAULT_LEVEL = logging.INFO
ENTRY_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s"
STDOUT_FORMAT = "%(asctime)s %(message)s"
DATE_FORMAT = "%H:%M:%S"
MAIN_LOG_FILE = "/tmp/anaconda.log"
PROGRAM_LOG_FILE = "/tmp/program.log"
STORAGE_LOG_FILE = "/tmp/storage.log"
PACKAGING_LOG_FILE = "/tmp/packaging.log"
SENSITIVE_INFO_LOG_FILE = "/tmp/sensitive-info.log"
ANACONDA_SYSLOG_FACILITY = SysLogHandler.LOG_LOCAL1
from threading import Lock
program_log_lock = Lock()
logLevelMap = {"lock": LOGLVL_LOCK,
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL}
# sets autoSetLevel for the given handler
def autoSetLevel(handler, value):
handler.autoSetLevel = value
# all handlers of given logger with autoSetLevel == True are set to level
def setHandlersLevel(logr, level):
for handler in filter(lambda hdlr: hasattr(hdlr, "autoSetLevel") and hdlr.autoSetLevel, logr.handlers):
handler.setLevel(level)
class AnacondaSyslogHandler(SysLogHandler):
# syslog doesn't understand these level names
levelMap = {"ERR": "error",
"CRIT": "critical",
"LOCK": "debug"}
def __init__(self,
address=('localhost', SYSLOG_UDP_PORT),
facility=SysLogHandler.LOG_USER,
tag=''):
self.tag = tag
SysLogHandler.__init__(self, address, facility)
def emit(self, record):
original_msg = record.msg
record.msg = '%s: %s' %(self.tag, original_msg)
SysLogHandler.emit(self, record)
record.msg = original_msg
def mapPriority(self, level):
"""Map the priority level to a syslog level """
return self.levelMap.get(level, SysLogHandler.mapPriority(self, level))
class AnacondaSocketHandler(SocketHandler):
def makePickle(self, record):
return bytes(self.formatter.format(record) + "\n", "utf-8")
class AnacondaLog:
SYSLOG_CFGFILE = "/etc/rsyslog.conf"
VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
def __init__(self):
self.loglevel = DEFAULT_LEVEL
self.remote_syslog = None
# Rename the loglevels so they are the same as in syslog.
logging.addLevelName(logging.WARNING, "WARN")
logging.addLevelName(logging.ERROR, "ERR")
logging.addLevelName(logging.CRITICAL, "CRIT")
logging.addLevelName(LOGLVL_LOCK, "LOCK")
# Create the base of the logger hierarchy.
self.anaconda_logger = logging.getLogger("anaconda")
self.addFileHandler(MAIN_LOG_FILE, self.anaconda_logger,
minLevel=logging.DEBUG)
warnings.showwarning = self.showwarning
# Create the storage logger.
storage_logger = logging.getLogger("blivet")
self.addFileHandler(STORAGE_LOG_FILE, storage_logger,
minLevel=logging.DEBUG)
# Set the common parameters for anaconda and storage loggers.
for logr in [self.anaconda_logger, storage_logger]:
logr.setLevel(logging.DEBUG)
self.forwardToSyslog(logr)
# External program output log
program_logger = logging.getLogger("program")
program_logger.setLevel(logging.DEBUG)
self.addFileHandler(PROGRAM_LOG_FILE, program_logger,
minLevel=logging.DEBUG)
self.forwardToSyslog(program_logger)
# Create the packaging logger.
packaging_logger = logging.getLogger("packaging")
packaging_logger.setLevel(LOGLVL_LOCK)
self.addFileHandler(PACKAGING_LOG_FILE, packaging_logger,
minLevel=logging.INFO,
autoLevel=True)
self.forwardToSyslog(packaging_logger)
# Create the yum logger and link it to packaging
yum_logger = logging.getLogger("yum")
yum_logger.setLevel(logging.DEBUG)
self.addFileHandler(PACKAGING_LOG_FILE, yum_logger,
minLevel=logging.DEBUG)
self.forwardToSyslog(yum_logger)
# Create the sensitive information logger
# * the sensitive-info.log file is not copied to the installed
# system, as it might contain sensitive information that
# should not be persistently stored by default
sensitive_logger = logging.getLogger("sensitive-info")
self.addFileHandler(SENSITIVE_INFO_LOG_FILE, sensitive_logger,
minLevel=logging.DEBUG)
# Create a second logger for just the stuff we want to dup on
# stdout. Anything written here will also get passed up to the
# parent loggers for processing and possibly be written to the
# log.
stdoutLogger = logging.getLogger("anaconda.stdout")
stdoutLogger.setLevel(logging.INFO)
# Add a handler for the duped stuff. No fancy formatting, thanks.
self.addFileHandler(sys.stdout, stdoutLogger,
fmtStr=STDOUT_FORMAT, minLevel=logging.INFO)
# Stderr logger
stderrLogger = logging.getLogger("anaconda.stderr")
stderrLogger.setLevel(logging.INFO)
self.addFileHandler(sys.stderr, stderrLogger,
fmtStr=STDOUT_FORMAT, minLevel=logging.INFO)
# Add a simple handler - file or stream, depending on what we're given.
def addFileHandler(self, dest, addToLogger, minLevel=DEFAULT_LEVEL,
fmtStr=ENTRY_FORMAT,
autoLevel=False):
try:
if isinstance(dest, str):
logfileHandler = logging.FileHandler(dest)
else:
logfileHandler = logging.StreamHandler(dest)
logfileHandler.setLevel(minLevel)
logfileHandler.setFormatter(logging.Formatter(fmtStr, DATE_FORMAT))
autoSetLevel(logfileHandler, autoLevel)
addToLogger.addHandler(logfileHandler)
except IOError:
pass
def forwardToSyslog(self, logr):
"""Forward everything that goes in the logger to the syslog daemon.
"""
if flags.imageInstall or flags.dirInstall:
# don't clutter up the system logs when doing an image install
return
syslogHandler = AnacondaSyslogHandler(
'/dev/log',
ANACONDA_SYSLOG_FACILITY,
logr.name)
syslogHandler.setLevel(logging.DEBUG)
logr.addHandler(syslogHandler)
# pylint: disable=redefined-builtin
def showwarning(self, message, category, filename, lineno,
file=sys.stderr, line=None):
""" Make sure messages sent through python's warnings module get logged.
The warnings mechanism is used by some libraries we use,
notably pykickstart.
"""
self.anaconda_logger.warning("%s", warnings.formatwarning(
message, category, filename, lineno, line))
def setup_remotelog(self, host, port):
remotelog = AnacondaSocketHandler(host, port)
remotelog.setFormatter(logging.Formatter(ENTRY_FORMAT, DATE_FORMAT))
remotelog.setLevel(logging.DEBUG)
logging.getLogger().addHandler(remotelog)
def restartSyslog(self):
# Import here instead of at the module level to avoid an import loop
from pyanaconda.iutil import execWithRedirect
execWithRedirect("systemctl", ["restart", "rsyslog.service"])
def updateRemote(self, remote_syslog):
"""Updates the location of remote rsyslogd to forward to.
Requires updating rsyslogd config and restarting rsyslog
"""
TEMPLATE = "*.* @@%s\n"
self.remote_syslog = remote_syslog
with open(self.SYSLOG_CFGFILE, 'a') as cfgfile:
forward_line = TEMPLATE % remote_syslog
cfgfile.write(forward_line)
self.restartSyslog()
def setupVirtio(self):
"""Setup virtio rsyslog logging.
"""
TEMPLATE = "*.* %s;anaconda_syslog\n"
vport = flags.cmdline.get('virtiolog') or self.VIRTIO_PORT
if not os.access(vport, os.W_OK):
return
with open(self.SYSLOG_CFGFILE, 'a') as cfgfile:
cfgfile.write(TEMPLATE % (vport,))
self.restartSyslog()
logger = None
def init():
global logger
logger = AnacondaLog()
|
carlcarl/PyGithub
|
refs/heads/master
|
github/tests/Issue140.py
|
39
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import github
class Issue140(Framework.TestCase): # https://github.com/jacquev6/PyGithub/issues/140
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_repo("twitter/bootstrap")
def testGetDirContentsThenLazyCompletionOfFile(self):
contents = self.repo.get_dir_contents("/js")
self.assertEqual(len(contents), 15)
n = 0
for content in contents:
if content.path == "js/bootstrap-affix.js":
self.assertEqual(len(content.content), 4722) # Lazy completion
n += 1
elif content.path == "js/tests":
self.assertEqual(content.content, None) # No completion at all
n += 1
self.assertEqual(n, 2)
def testGetFileContents(self):
contents = self.repo.get_file_contents("/js/bootstrap-affix.js")
self.assertEqual(contents.encoding, "base64")
self.assertEqual(contents.url, "https://api.github.com/repos/twitter/bootstrap/contents/js/bootstrap-affix.js")
self.assertEqual(len(contents.content), 4722)
def testGetDirContentsWithRef(self):
self.assertEqual(len(self.repo.get_dir_contents("/js", "8c7f9c66a7d12f47f50618ef420868fe836d0c33")), 15)
|
kived/kivy
|
refs/heads/master
|
examples/widgets/popup_with_kv.py
|
4
|
'''
Example to show a Popup usage with the content from kv lang.
'''
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.app import App
from kivy.lang import Builder
Builder.load_string('''
<CustomPopup>:
size_hint: .5, .5
auto_dismiss: False
title: 'Hello world'
Button:
text: 'Click me to dismiss'
on_press: root.dismiss()
''')
class CustomPopup(Popup):
pass
class TestApp(App):
def build(self):
b = Button(on_press=self.show_popup)
return b
def show_popup(self, b):
p = CustomPopup()
p.open()
TestApp().run()
|
jonathanstowe/XDG
|
refs/heads/master
|
lib/XDG/IconTheme.py
|
1
|
"""
Complete implementation of the XDG Icon Spec Version 0.8
http://standards.freedesktop.org/icon-theme-spec/
"""
import os, sys, time
from xdg.IniFile import *
from xdg.BaseDirectory import *
from xdg.Exceptions import *
import xdg.Config
class IconTheme(IniFile):
"Class to parse and validate IconThemes"
def __init__(self):
IniFile.__init__(self)
def __repr__(self):
return self.name
def parse(self, file):
IniFile.parse(self, file, ["Icon Theme", "KDE Icon Theme"])
self.dir = os.path.dirname(file)
(nil, self.name) = os.path.split(self.dir)
def getDir(self):
return self.dir
# Standard Keys
def getName(self):
return self.get('Name', locale=True)
def getComment(self):
return self.get('Comment', locale=True)
def getInherits(self):
return self.get('Inherits', list=True)
def getDirectories(self):
return self.get('Directories', list=True)
def getHidden(self):
return self.get('Hidden', type="boolean")
def getExample(self):
return self.get('Example')
# Per Directory Keys
def getSize(self, directory):
return self.get('Size', type="integer", group=directory)
def getContext(self, directory):
return self.get('Context', group=directory)
def getType(self, directory):
value = self.get('Type', group=directory)
if value:
return value
else:
return "Threshold"
def getMaxSize(self, directory):
value = self.get('MaxSize', type="integer", group=directory)
if value or value == 0:
return value
else:
return self.getSize(directory)
def getMinSize(self, directory):
value = self.get('MinSize', type="integer", group=directory)
if value or value == 0:
return value
else:
return self.getSize(directory)
def getThreshold(self, directory):
value = self.get('Threshold', type="integer", group=directory)
if value or value == 0:
return value
else:
return 2
# validation stuff
def checkExtras(self):
# header
if self.defaultGroup == "KDE Icon Theme":
self.warnings.append('[KDE Icon Theme]-Header is deprecated')
# file extension
if self.fileExtension == ".theme":
pass
elif self.fileExtension == ".desktop":
self.warnings.append('.desktop fileExtension is deprecated')
else:
self.warnings.append('Unknown File extension')
# Check required keys
# Name
try:
self.name = self.content[self.defaultGroup]["Name"]
except KeyError:
self.errors.append("Key 'Name' is missing")
# Comment
try:
self.comment = self.content[self.defaultGroup]["Comment"]
except KeyError:
self.errors.append("Key 'Comment' is missing")
# Directories
try:
self.directories = self.content[self.defaultGroup]["Directories"]
except KeyError:
self.errors.append("Key 'Directories' is missing")
def checkGroup(self, group):
# check if group header is valid
if group == self.defaultGroup:
pass
elif group in self.getDirectories():
try:
self.type = self.content[group]["Type"]
except KeyError:
self.type = "Threshold"
try:
self.name = self.content[group]["Name"]
except KeyError:
self.errors.append("Key 'Name' in Group '%s' is missing" % group)
elif not (re.match("^\[X-", group) and group.decode("utf-8", "ignore").encode("ascii", 'ignore') == group):
self.errors.append("Invalid Group name: %s" % group)
def checkKey(self, key, value, group):
# standard keys
if group == self.defaultGroup:
if re.match("^Name"+xdg.Locale.regex+"$", key):
pass
elif re.match("^Comment"+xdg.Locale.regex+"$", key):
pass
elif key == "Inherits":
self.checkValue(key, value, list=True)
elif key == "Directories":
self.checkValue(key, value, list=True)
elif key == "Hidden":
self.checkValue(key, value, type="boolean")
elif key == "Example":
self.checkValue(key, value)
elif re.match("^X-[a-zA-Z0-9-]+", key):
pass
else:
self.errors.append("Invalid key: %s" % key)
elif group in self.getDirectories():
if key == "Size":
self.checkValue(key, value, type="integer")
elif key == "Context":
self.checkValue(key, value)
elif key == "Type":
self.checkValue(key, value)
if value not in ["Fixed", "Scalable", "Threshold"]:
self.errors.append("Key 'Type' must be one out of 'Fixed','Scalable','Threshold', but is %s" % value)
elif key == "MaxSize":
self.checkValue(key, value, type="integer")
if self.type != "Scalable":
self.errors.append("Key 'MaxSize' give, but Type is %s" % self.type)
elif key == "MinSize":
self.checkValue(key, value, type="integer")
if self.type != "Scalable":
self.errors.append("Key 'MinSize' give, but Type is %s" % self.type)
elif key == "Threshold":
self.checkValue(key, value, type="integer")
if self.type != "Threshold":
self.errors.append("Key 'Threshold' give, but Type is %s" % self.type)
elif re.match("^X-[a-zA-Z0-9-]+", key):
pass
else:
self.errors.append("Invalid key: %s" % key)
class IconData(IniFile):
"Class to parse and validate IconData Files"
def __init__(self):
IniFile.__init__(self)
def __repr__(self):
return self.getDisplayName()
def parse(self, file):
IniFile.parse(self, file, ["Icon Data"])
# Standard Keys
def getDisplayName(self):
return self.get('DisplayName', locale=True)
def getEmbeddedTextRectangle(self):
return self.get('EmbeddedTextRectangle', list=True)
def getAttachPoints(self):
return self.get('AttachPoints', type="point", list=True)
# validation stuff
def checkExtras(self):
# file extension
if self.fileExtension != ".icon":
self.warnings.append('Unknown File extension')
def checkGroup(self, group):
# check if group header is valid
if not (group == self.defaultGroup \
or (re.match("^\[X-", group) and group.encode("ascii", 'ignore') == group)):
self.errors.append("Invalid Group name: %s" % group.encode("ascii", "replace"))
def checkKey(self, key, value, group):
# standard keys
if re.match("^DisplayName"+xdg.Locale.regex+"$", key):
pass
elif key == "EmbeddedTextRectangle":
self.checkValue(key, value, type="integer", list=True)
elif key == "AttachPoints":
self.checkValue(key, value, type="point", list=True)
elif re.match("^X-[a-zA-Z0-9-]+", key):
pass
else:
self.errors.append("Invalid key: %s" % key)
icondirs = []
for basedir in xdg_data_dirs:
icondirs.append(os.path.join(basedir, "icons"))
icondirs.append(os.path.join(basedir, "pixmaps"))
icondirs.append(os.path.expanduser("~/.icons"))
# just cache variables, they give a 10x speed improvement
themes = []
cache = dict()
dache = dict()
eache = dict()
def getIconPath(iconname, size = None, theme = None, extensions = ["png", "svg", "xpm"]):
global themes
if size == None:
size = xdg.Config.icon_size
if theme == None:
theme = xdg.Config.icon_theme
# if we have an absolute path, just return it
if os.path.isabs(iconname):
return iconname
# check if it has an extension and strip it
if os.path.splitext(iconname)[1][1:] in extensions:
iconname = os.path.splitext(iconname)[0]
# parse theme files
try:
if themes[0].name != theme:
themes = []
__addTheme(theme)
except IndexError:
__addTheme(theme)
# more caching (icon looked up in the last 5 seconds?)
tmp = "".join([iconname, str(size), theme, "".join(extensions)])
if tmp in eache:
if int(time.time() - eache[tmp][0]) >= xdg.Config.cache_time:
del eache[tmp]
else:
return eache[tmp][1]
for thme in themes:
icon = LookupIcon(iconname, size, thme, extensions)
if icon:
eache[tmp] = [time.time(), icon]
return icon
# cache stuff again (directories lookuped up in the last 5 seconds?)
for directory in icondirs:
if (directory not in dache \
or (int(time.time() - dache[directory][1]) >= xdg.Config.cache_time \
and dache[directory][2] < os.path.getmtime(directory))) \
and os.path.isdir(directory):
dache[directory] = [os.listdir(directory), time.time(), os.path.getmtime(directory)]
for dir, values in dache.items():
for extension in extensions:
try:
if iconname + "." + extension in values[0]:
icon = os.path.join(dir, iconname + "." + extension)
eache[tmp] = [time.time(), icon]
return icon
except UnicodeDecodeError as e:
if debug:
raise e
else:
pass
# we haven't found anything? "hicolor" is our fallback
if theme != "hicolor":
icon = getIconPath(iconname, size, "hicolor")
eache[tmp] = [time.time(), icon]
return icon
def getIconData(path):
if os.path.isfile(path):
dirname = os.path.dirname(path)
basename = os.path.basename(path)
if os.path.isfile(os.path.join(dirname, basename + ".icon")):
data = IconData()
data.parse(os.path.join(dirname, basename + ".icon"))
return data
def __addTheme(theme):
for dir in icondirs:
if os.path.isfile(os.path.join(dir, theme, "index.theme")):
__parseTheme(os.path.join(dir,theme, "index.theme"))
break
elif os.path.isfile(os.path.join(dir, theme, "index.desktop")):
__parseTheme(os.path.join(dir,theme, "index.desktop"))
break
else:
if debug:
raise NoThemeError(theme)
def __parseTheme(file):
theme = IconTheme()
theme.parse(file)
themes.append(theme)
for subtheme in theme.getInherits():
__addTheme(subtheme)
def LookupIcon(iconname, size, theme, extensions):
# look for the cache
if theme.name not in cache:
cache[theme.name] = []
cache[theme.name].append(time.time() - (xdg.Config.cache_time + 1)) # [0] last time of lookup
cache[theme.name].append(0) # [1] mtime
cache[theme.name].append(dict()) # [2] dir: [subdir, [items]]
# cache stuff (directory lookuped up the in the last 5 seconds?)
if int(time.time() - cache[theme.name][0]) >= xdg.Config.cache_time:
cache[theme.name][0] = time.time()
for subdir in theme.getDirectories():
for directory in icondirs:
dir = os.path.join(directory,theme.name,subdir)
if (dir not in cache[theme.name][2] \
or cache[theme.name][1] < os.path.getmtime(os.path.join(directory,theme.name))) \
and subdir != "" \
and os.path.isdir(dir):
cache[theme.name][2][dir] = [subdir, os.listdir(dir)]
cache[theme.name][1] = os.path.getmtime(os.path.join(directory,theme.name))
for dir, values in cache[theme.name][2].items():
if DirectoryMatchesSize(values[0], size, theme):
for extension in extensions:
if iconname + "." + extension in values[1]:
return os.path.join(dir, iconname + "." + extension)
minimal_size = sys.maxint
closest_filename = ""
for dir, values in cache[theme.name][2].items():
distance = DirectorySizeDistance(values[0], size, theme)
if distance < minimal_size:
for extension in extensions:
if iconname + "." + extension in values[1]:
closest_filename = os.path.join(dir, iconname + "." + extension)
minimal_size = distance
return closest_filename
def DirectoryMatchesSize(subdir, iconsize, theme):
Type = theme.getType(subdir)
Size = theme.getSize(subdir)
Threshold = theme.getThreshold(subdir)
MinSize = theme.getMinSize(subdir)
MaxSize = theme.getMaxSize(subdir)
if Type == "Fixed":
return Size == iconsize
elif Type == "Scaleable":
return MinSize <= iconsize <= MaxSize
elif Type == "Threshold":
return Size - Threshold <= iconsize <= Size + Threshold
def DirectorySizeDistance(subdir, iconsize, theme):
Type = theme.getType(subdir)
Size = theme.getSize(subdir)
Threshold = theme.getThreshold(subdir)
MinSize = theme.getMinSize(subdir)
MaxSize = theme.getMaxSize(subdir)
if Type == "Fixed":
return abs(Size - iconsize)
elif Type == "Scalable":
if iconsize < MinSize:
return MinSize - iconsize
elif iconsize > MaxSize:
return MaxSize - iconsize
return 0
elif Type == "Threshold":
if iconsize < Size - Threshold:
return MinSize - iconsize
elif iconsize > Size + Threshold:
return iconsize - MaxSize
return 0
|
aodarc/tennis_club
|
refs/heads/master
|
news/apps.py
|
45
|
from django.apps import AppConfig
class NewsConfig(AppConfig):
name = 'news'
|
ashemedai/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/timer.py
|
168
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from datetime import datetime
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'timer'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.start_time = datetime.now()
def days_hours_minutes_seconds(self, runtime):
minutes = (runtime.seconds // 60) % 60
r_seconds = runtime.seconds - (minutes * 60)
return runtime.days, runtime.seconds // 3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
runtime = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))
|
scottp-dpaw/django-preserialize
|
refs/heads/master
|
test_suite.py
|
2
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
import django # noqa
if django.VERSION >= (1, 7):
django.setup()
from django.core import management # noqa
management.call_command('test', 'tests')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.