repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ar7z1/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_compute_forwarding_rule_facts.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_forwarding_rule_facts
description:
- Gather facts for GCP ForwardingRule
short_description: Gather facts for GCP ForwardingRule
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
region:
description:
- A reference to the region where the regional forwarding rule resides.
- This field is not applicable to global forwarding rules.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a forwarding rule facts
gcp_compute_forwarding_rule_facts:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
IPAddress:
description:
- The IP address that this forwarding rule is serving on behalf of.
- Addresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL
or INTERNAL) and scope (global or regional).
- When the load balancing scheme is EXTERNAL, for global forwarding rules, the address
must be a global IP, and for regional forwarding rules, the address must live in
the same region as the forwarding rule. If this field is empty, an ephemeral IPv4
address from the same scope (global or regional) will be assigned. A regional forwarding
rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.
- When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address
belonging to the network/subnet configured for the forwarding rule. By default,
if this field is empty, an ephemeral internal IP address will be automatically allocated
from the IP range of the subnet or network configured for this forwarding rule.
- 'An address can be specified either by a literal IP address or a URL reference to
an existing Address resource. The following examples are all valid: * 100.1.2.3
* U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address)
* projects/project/regions/region/addresses/address * regions/region/addresses/address
* global/addresses/address * address .'
returned: success
type: str
IPProtocol:
description:
- The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH,
SCTP or ICMP.
- When the load balancing scheme is INTERNAL, only TCP and UDP are valid.
returned: success
type: str
backendService:
description:
- A reference to a BackendService to receive the matched traffic.
- This is used for internal load balancing.
- "(not used for external load balancing) ."
returned: success
type: dict
ipVersion:
description:
- The IP Version that will be used by this forwarding rule. Valid options are IPV4
or IPV6. This can only be specified for a global forwarding rule.
returned: success
type: str
loadBalancingScheme:
description:
- 'This signifies what the ForwardingRule will be used for and can only take the following
values: INTERNAL, EXTERNAL The value of INTERNAL means that this will be used for
Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this
will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)
.'
returned: success
type: str
name:
description:
- Name of the resource; provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
network:
description:
- For internal load balancing, this field identifies the network that the load balanced
IP should belong to for this Forwarding Rule. If this field is not specified, the
default network will be used.
- This field is not used for external load balancing.
returned: success
type: dict
portRange:
description:
- This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy,
TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.
- Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to
ports in the specified range will be forwarded to target.
- Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port
ranges.
- 'Some types of forwarding target have constraints on the acceptable ports: * TargetHttpProxy:
80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465,
587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, 43, 110,
143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway:
500, 4500 .'
returned: success
type: str
ports:
description:
- This field is used along with the backend_service field for internal load balancing.
- When the load balancing scheme is INTERNAL, a single port or a comma separated list
of ports can be configured. Only packets addressed to these ports will be forwarded
to the backends configured with this forwarding rule.
- You may specify a maximum of up to 5 ports.
returned: success
type: list
subnetwork:
description:
- A reference to a subnetwork.
- For internal load balancing, this field identifies the subnetwork that the load
balanced IP should belong to for this Forwarding Rule.
- If the network specified is in auto subnet mode, this field is optional. However,
if the network is in custom subnet mode, a subnetwork must be specified.
- This field is not used for external load balancing.
returned: success
type: dict
target:
description:
- A reference to a TargetPool resource to receive the matched traffic.
- For regional forwarding rules, this target must live in the same region as the forwarding
rule. For global forwarding rules, this target must be a global load balancing resource.
The forwarded traffic must be of a type appropriate to the target object.
- This field is not used for internal load balancing.
returned: success
type: dict
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally during
updates.
returned: success
type: str
networkTier:
description:
- 'The networking tier used for configuring this address. This field can take the
following values: PREMIUM or STANDARD. If this field is not specified, it is assumed
to be PREMIUM.'
returned: success
type: str
region:
description:
- A reference to the region where the regional forwarding rule resides.
- This field is not applicable to global forwarding rules.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
filters=dict(type='list', elements='str'),
region=dict(required=True, type='str')
)
)
if 'scopes' not in module.params:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
jasonwee/asus-rt-n14uhp-mrtg
|
refs/heads/master
|
src/lesson_developer_tools/doctest_hashed_values.py
|
1
|
keys = ['a', 'aa', 'aaa']
print('dict:', {k: len(k) for k in keys})
print('set :', set(keys))
|
pfnet/chainer
|
refs/heads/master
|
chainerx_build_helper.py
|
4
|
# This script is based on pybind11's example script. See the original via the
# following URL: https://github.com/pybind/cmake_example/blob/master/setup.py
import distutils
import os
import platform
import re
import subprocess
import sys
import setuptools
from setuptools.command import build_ext
def emit_build_info(build_chainerx):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'chainerx/_build_info.py')
with open(filename, mode='w') as f:
f.write('build_chainerx = {}\n'.format(build_chainerx))
class CMakeExtension(setuptools.Extension):
def __init__(self, name, build_targets, sourcedir=''):
setuptools.Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.build_targets = build_targets
class CMakeBuild(build_ext.build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError('CMake must be installed to build ChainerX')
cmake_version = distutils.version.LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError('CMake >= 3.1.0 is required to build ChainerX')
generator = os.getenv('CHAINERX_CMAKE_GENERATOR', '').lower()
if generator not in ['', 'ninja']:
raise RuntimeError("Generator %s is not supported." % generator)
self.use_ninja = generator == 'ninja'
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
# Decide the build type: release/debug
build_type = os.getenv('CHAINERX_BUILD_TYPE', None)
if build_type is not None:
# Use environment variable
pass
elif self.debug:
# Being built with `python setup.py build --debug`
build_type = 'Debug'
elif os.getenv('READTHEDOCS', None) == 'True':
# on ReadTheDocs
build_type = 'Debug'
else:
# default
build_type = 'Release'
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-GNinja'] if self.use_ninja else []
cmake_args += [
'-DCHAINERX_BUILD_PYTHON=1',
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DCHAINERX_BUILD_TEST=OFF',
'-DCMAKE_BUILD_TYPE=' + build_type,
]
build_args = ['--config', build_type]
if platform.system() == 'Windows':
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
build_type.upper(), extdir)]
if not self.use_ninja:
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
build_args += ['--']
build_args += ext.build_targets
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp,
env=env)
subprocess.check_call(
['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
def config_setup_kwargs(setup_kwargs, build_chainerx):
# TODO(imanishi): Call this function with setuptools.
emit_build_info(build_chainerx)
if not build_chainerx:
# `chainerx` package needs to be able to be imported even if ChainerX
# is unavailable.
setup_kwargs['packages'] += ['chainerx']
return
if sys.version_info < (3, 5):
raise RuntimeError(
'ChainerX is only available for Python 3.5 or later.')
setup_kwargs['packages'] += [
'chainerx',
'chainerx._docs',
'chainerx.creation',
'chainerx.manipulation',
'chainerx.math',
'chainerx.random',
'chainerx.testing',
]
setup_kwargs['package_data'] = {
'chainerx': ['py.typed', '*.pyi'],
}
setup_kwargs.update(dict(
cmdclass={'build_ext': CMakeBuild},
ext_modules=[CMakeExtension(
name='chainerx._core',
build_targets=['_core.so'],
sourcedir='chainerx_cc')],
))
|
freeflightsim/ffs-app-engine
|
refs/heads/master
|
_freeflightsim.appspot.com.1/gdata/Crypto/Hash/MD5.py
|
226
|
# Just use the MD5 module from the Python standard library
__revision__ = "$Id: MD5.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $"
from md5 import *
import md5
if hasattr(md5, 'digestsize'):
digest_size = digestsize
del digestsize
del md5
|
michealcarrerweb/LHVent_app
|
refs/heads/master
|
hourly/models.py
|
1
|
from __future__ import unicode_literals
from django.db import models
class Hourly(models.Model):
hourly_base = models.IntegerField()
def __str__(self):
return str(self.hourly_base)
|
vipulkanade/EventbriteDjango
|
refs/heads/master
|
lib/python2.7/site-packages/django/test/html.py
|
220
|
"""
Comparing two html documents.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html_parser import HTMLParseError, HTMLParser
WHITESPACE = re.compile('\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
@python_2_unicode_compatible
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, six.string_types):
element = force_text(element)
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], six.string_types):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], six.string_types):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], six.string_types):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, six.string_types):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name'):
return False
if hasattr(element, 'name') and self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
if self.children != element.children:
return False
return True
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def __ne__(self, element):
return not self.__eq__(element)
def _count(self, element, count=True):
if not isinstance(element, six.string_types):
if self == element:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, six.string_types):
if isinstance(element, six.string_types):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(six.text_type(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += ' />'
return output
def __repr__(self):
return six.text_type(self)
@python_2_unicode_compatible
class RootElement(Element):
def __init__(self):
super(RootElement, self).__init__(None, ())
def __str__(self):
return ''.join(six.text_type(c) for c in self.children)
class Parser(HTMLParser):
SELF_CLOSING_TAGS = ('br', 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
def __init__(self):
HTMLParser.__init__(self)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Takes a string that contains *valid* HTML and turns it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], six.string_types):
document = document.children[0]
return document
|
ChopChopKodi/pelisalacarta
|
refs/heads/master
|
python/main-classic/lib/sambatools/pyasn1/debug.py
|
10
|
import logging
from pyasn1 import __version__
from pyasn1 import error
from pyasn1.compat.octets import octs2ints
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Printer:
def __init__(self, logger=None, handler=None, formatter=None):
if logger is None:
logger = logging.getLogger('pyasn1')
logger.setLevel(logging.DEBUG)
if handler is None:
handler = logging.StreamHandler()
if formatter is None:
formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.__logger = logger
def __call__(self, msg): self.__logger.debug(msg)
def __str__(self): return '<python built-in logging>'
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
# Python 2.6 and older
class NullHandler(logging.Handler):
def emit(self, record):
pass
class Debug:
defaultPrinter = None
def __init__(self, *flags, **options):
self._flags = flagNone
if options.get('printer') is not None:
self._printer = options.get('printer')
elif self.defaultPrinter is not None:
self._printer = self.defaultPrinter
if 'loggerName' in options:
# route our logs to parent logger
self._printer = Printer(
logger=logging.getLogger(options['loggerName']),
handler=NullHandler()
)
else:
self._printer = Printer()
self('running pyasn1 version %s' % __version__)
for f in flags:
inverse = f and f[0] in ('!', '~')
if inverse:
f = f[1:]
try:
if inverse:
self._flags &= ~flagMap[f]
else:
self._flags |= flagMap[f]
except KeyError:
raise error.PyAsn1Error('bad debug flag %s' % f)
self('debug category \'%s\' %s' % (f, inverse and 'disabled' or 'enabled'))
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer(msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
|
vrbala/timeseries-analysis
|
refs/heads/master
|
fib.py
|
1
|
#! /usr/bin/python
import sys
import time
from threading import Thread
import psutil
import random
def fib(n):
if n <= 1: return n
else: return fib(n-1) + fib(n-2)
if __name__ == "__main__":
fib(int(sys.argv[1]))
time.sleep(float(sys.argv[2]))
|
tempbottle/Firefly
|
refs/heads/master
|
firefly/_version.py
|
8
|
#coding:utf8
'''
Created on 2013-10-21
@author: lan (www.9miao.com)
'''
from twisted.python import versions
version = versions.Version('firefly', 1, 3, 3)
|
jjyycchh/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/stack_utils.py
|
215
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple routines for logging, obtaining thread stack information."""
import sys
import traceback
def log_thread_state(logger, name, thread_id, msg=''):
"""Log information about the given thread state."""
stack = _find_thread_stack(thread_id)
assert(stack is not None)
logger("")
logger("%s (tid %d) %s" % (name, thread_id, msg))
_log_stack(logger, stack)
logger("")
def _find_thread_stack(thread_id):
"""Returns a stack object that can be used to dump a stack trace for
the given thread id (or None if the id is not found)."""
for tid, stack in sys._current_frames().items():
if tid == thread_id:
return stack
return None
def _log_stack(logger, stack):
"""Log a stack trace to the logger callback."""
for filename, lineno, name, line in traceback.extract_stack(stack):
logger('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
logger(' %s' % line.strip())
def log_traceback(logger, tb):
stack = traceback.extract_tb(tb)
for frame_str in traceback.format_list(stack):
for line in frame_str.split('\n'):
if line:
logger(" %s" % line)
|
dsprenkels/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py
|
636
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
|
mtunique/knows
|
refs/heads/master
|
algorithm/Bayes/naive_testing.py
|
1
|
__author__ = 'mxunique'
# coding: utf-8
from bayes_text import NaiveBayesClassifier
from basic_analyse import *
from std_settings import *
def train():
nbc = NaiveBayesClassifier()
nbc.set_tags_list()
nbc.set_classes_dict()
nbc.all_in_one_train(src_training_dirpath)
def predict():
nbc = NaiveBayesClassifier()
nbc.easy_init()
for file in get_file_list(src_testing_dirpath):
print "now predicting %s" % file
filepath = src_testing_dirpath + "/" + file
nbc.bayes_classify(doc=filepath)
print "======================="
def test():
nbc = NaiveBayesClassifier()
nbc.easy_init()
nbc.bayes_classify(string="", screen=True)
if __name__ == '__main__':
test()
pass
|
hassaanm/stock-trading
|
refs/heads/master
|
pybrain-pybrain-87c7ac3/pybrain/rl/explorers/explorer.py
|
3
|
__author__ = "Thomas Rueckstiess, ruecksti@in.tum.de"
from pybrain.structure.modules.module import Module
class Explorer(Module):
""" An Explorer object is used in Agents, receives the current state
and action (from the controller Module) and returns an explorative
action that is executed instead the given action.
"""
def activate(self, state, action):
""" The super class commonly ignores the state and simply passes the
action through the module. implement _forwardImplementation()
in subclasses.
"""
return Module.activate(self, action)
def newEpisode(self):
""" Inform the explorer about the start of a new episode. """
pass
|
jfterpstra/bluebottle
|
refs/heads/develop
|
bluebottle/payments_voucher/models.py
|
1
|
from django.conf import settings
from django.db import models
from decimal import Decimal
from bluebottle.payments.models import Payment
from djchoices.choices import DjangoChoices, ChoiceItem
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, \
CreationDateTimeField
class VoucherPayment(Payment):
voucher = models.OneToOneField('payments_voucher.Voucher',
verbose_name=_("Voucher"),
related_name='payment')
class Meta:
ordering = ('-created', '-updated')
verbose_name = "Voucher Payment"
verbose_name_plural = "Voucher Payments"
def get_fee(self):
# Fix me. Get the fee from the payment that bought the related voucher.
return Decimal(0)
def get_method_name(self):
return 'Voucher'
def get_method_icon(self):
return 'images/payments_voucher/icons/icon-gift-card.svg'
class VoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
paid = ChoiceItem('paid', label=_("Paid"))
cancelled = ChoiceItem('cancelled', label=_("Cancelled"))
cashed = ChoiceItem('cashed', label=_("Cashed"))
cashed_by_proxy = ChoiceItem('cashed_by_proxy', label=_("Cashed by us"))
class Voucher(models.Model):
class VoucherLanguages(DjangoChoices):
en = ChoiceItem('en', label=_("English"))
nl = ChoiceItem('nl', label=_("Dutch"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("Currency"), max_length=3, default='EUR')
language = models.CharField(_("Language"), max_length=2,
choices=VoucherLanguages.choices,
default=VoucherLanguages.en)
message = models.TextField(_("Message"), blank=True, default="",
max_length=500)
code = models.CharField(_("Code"), blank=True, default="", max_length=100)
status = models.CharField(_("Status"), max_length=20,
choices=VoucherStatuses.choices,
default=VoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
sender = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Sender"), related_name="buyer",
null=True, blank=True)
sender_email = models.EmailField(_("Sender email"))
sender_name = models.CharField(_("Sender name"), blank=True, default="",
max_length=100)
receiver = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Receiver"),
related_name="casher", null=True, blank=True)
receiver_email = models.EmailField(_("Receiver email"))
receiver_name = models.CharField(_("Receiver name"), blank=True, default="",
max_length=100)
order = models.ForeignKey('orders.Order', verbose_name=_("Order"),
help_text=_("The order that bought this voucher"),
null=True)
def __unicode__(self):
code = "New"
if self.code:
code = self.code
return code
|
illicitonion/givabit
|
refs/heads/master
|
lib/sdks/google_appengine_1.7.1/google_appengine/lib/grizzled/grizzled/file/__init__.py
|
19
|
"""
This module contains file- and path-related methods, classes, and modules.
"""
from __future__ import with_statement, absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os as _os
import sys
import shutil
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['unlink_quietly', 'recursively_remove', 'copy_recursively',
'copy', 'touch', 'pathsplit', 'eglob', 'universal_path',
'native_path']
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def unlink_quietly(*paths):
"""
Like the standard ``os.unlink()`` function, this function attempts to
delete a file. However, it swallows any exceptions that occur during the
unlink operation, making it more suitable for certain uses (e.g.,
in ``atexit`` handlers).
:Parameters:
paths : str or list
path(s) to unlink
"""
def looper(*paths):
for i in paths:
if type(i) == list:
for path in i:
yield path
else:
yield i
for path in looper(*paths):
try:
_os.unlink(path)
except:
pass
def recursively_remove(dir):
"""
Recursively remove all files and directories below and including a
specified directory.
:Parameters:
dir : str
path to directory to remove
"""
if not _os.path.exists(dir):
return
shutil.rmtree(dir)
def list_recursively(dir):
"""
Recursively list the contents of a directory. Yields the contents of
the directory and all subdirectories. This method returns a generator,
so it evaluates its recursive walk lazily.
:Parameters:
dir : str
Path to directory to list
:raise ValueError: If ``dir`` does not exist, or if ``dir`` exists
but is not a directory.
"""
if not _os.path.isdir(dir):
raise ValueError, "%s is not a directory." % dir
for f in _os.listdir(dir):
if _os.path.isdir(f):
list_recursively(f)
else:
yield f
def copy_recursively(source_dir, target_dir):
"""
Recursively copy a source directory (and all its contents) to a target
directory.
:Parameters:
source_dir : str
Source directory to copy recursively. This path must
exist and must specify a directory; otherwise, this
function throws a ``ValueError``
target_dir : str
Directory to which to copy the contents of ``source_dir``.
This directory must not already exist.
:raise ValueError: If: ``source_dir`` does not exist; ``source_dir`` exists
but is not a directory; or ``target_dir`` exists but is
not a directory.
"""
shutil.copytree(source_dir, target_dir)
def copy(files, target_dir, create_target=False):
"""
Copy one or more files to a target directory.
:Parameters:
files : str or list
single file path or a list of file paths to be copied
target_dir : str
path to target directory
create_target : bool
If ``True``, ``copy()`` will attempt to create the target directory
if it does not exist. If ``False``, ``copy()`` will throw an
exception if the target directory does not exist.
:raise OSError: ``target_dir`` does not exist, and ``create_target`` is
``False``
"""
if type(files) == str:
files = [files]
if not _os.path.exists(target_dir):
if create_target:
_os.mkdir(target_dir)
if _os.path.exists(target_dir) and (not _os.path.isdir(target_dir)):
raise OSError, 'Cannot copy files to non-directory "%s"' % target_dir
for f in files:
targetFile = _os.path.join(target_dir, _os.path.basename(f))
open(targetFile, 'wb').write(open(f, 'rb').read())
def touch(files, times=None):
"""
Similar to the Unix *touch* command, this function:
- updates the access and modification times for any existing files
in a list of files
- creates any non-existent files in the list of files
If any file in the list is a directory, this function will throw an
exception.
:Parameters:
files : list or str
pathname or list of pathnames of files to be created or updated
times : tuple
tuple of the form (*atime*, *mtime*), identical to
what is passed to the standard ``os.utime()`` function.
If this tuple is ``None``, then the current time is used.
"""
if type(files) == str:
files = [files]
for f in files:
if _os.path.exists(f):
if not _os.path.isfile(f):
raise OSError, "Can't touch non-file \"%s\"" % f
_os.utime(f, times)
else:
# Doesn't exist. Create it.
open(f, 'wb').close()
def pathsplit(path):
"""
Split a path into an array of path components, using the file separator
('/' on POSIX systems, '\' on Windows) that's appropriate for the
underlying operating system. Does not take drive letters into account.
If there's a Windows drive letter in the path, it'll end up with the
first component.
:Parameters:
path : str
path to split. Can be relative or absolute
:rtype: list
:return: a list of path components
"""
result = []
(head, tail) = _os.path.split(path)
if (not head) or (head == path):
# No file separator. Done.
pass
else:
result = pathsplit(head)
if tail:
result += [tail]
return result
def __find_matches(pattern_pieces, directory):
"""
Used by eglob.
"""
import glob
result = []
if not _os.path.isdir(directory):
return []
piece = pattern_pieces[0]
last = len(pattern_pieces) == 1
if piece == '**':
if not last:
remaining_pieces = pattern_pieces[1:]
for root, dirs, files in _os.walk(directory):
if last:
# At the end of a pattern, "**" just recursively matches
# directories.
result += [root]
else:
# Recurse downward, trying to match the rest of the
# pattern.
sub_result = __find_matches(remaining_pieces, root)
for partial_path in sub_result:
result += [partial_path]
else:
# Regular glob pattern.
matches = glob.glob(_os.path.join(directory, piece))
if len(matches) > 0:
if last:
for match in matches:
result += [match]
else:
remaining_pieces = pattern_pieces[1:]
for match in matches:
sub_result = __find_matches(remaining_pieces, match)
for partial_path in sub_result:
result += [partial_path]
# Normalize the paths.
for i in range(len(result)):
result[i] = _os.path.normpath(result[i])
return result
def eglob(pattern, directory='.'):
"""
Extended glob function that supports the all the wildcards supported
by the Python standard ``glob`` routine, as well as a special "**"
wildcard that recursively matches any directory. Examples:
+--------------+--------------------------------------------------------+
| \*\*/\*.py | all files ending in '.py' under the current directory |
+--------------+--------------------------------------------------------+
| foo/\*\*/bar | all files name 'bar' anywhere under subdirectory 'foo' |
+--------------+--------------------------------------------------------+
:Parameters:
pattern : str
The wildcard pattern. Must be a simple pattern with no directories.
directory : str
The directory in which to do the globbing.
:rtype: list
:return: A list of matched files, or an empty list for no match
"""
pieces = pathsplit(pattern)
return __find_matches(pieces, directory)
def universal_path(path):
"""
Converts a path name from its operating system-specific format to a
universal path notation. Universal path notation always uses a Unix-style
"/" to separate path elements. A universal path can be converted to a
native (operating system-specific) path via the ``native_path()``
function. Note that on POSIX-compliant systems, this function simply
returns the ``path`` parameter unmodified.
:Parameters:
path : str
the path to convert to universal path notation
:rtype: str
:return: the universal path.
"""
if _os.name != 'posix':
path = path.replace(_os.path.sep, '/')
return path
def native_path(path):
"""
Converts a path name from universal path notation to the operating
system-specific format. Universal path notation always uses a Unix-style
"/" to separate path elements. A native path can be converted to a
universal path via the ``universal_path()`` function. Note that on
POSIX-compliant systems, this function simply returns the ``path``
parameter unmodified.
:Parameters:
path : str
the path to convert to native path notation
:rtype: str
:return: the native path.
"""
if _os.name != 'posix':
path = path.replace('/', _os.path.sep)
return path
|
USGSDenverPychron/pychron
|
refs/heads/develop
|
pychron/canvas/designer/__init__.py
|
82
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
|
natefoo/pulsar
|
refs/heads/master
|
pulsar/client/staging/up.py
|
2
|
from io import open
from logging import getLogger
from os import sep
from os.path import (
abspath,
basename,
exists,
join,
relpath,
)
from re import escape, findall
from ..action_mapper import FileActionMapper
from ..action_mapper import MessageAction
from ..action_mapper import path_type
from ..job_directory import RemoteJobDirectory
from ..staging import CLIENT_INPUT_PATH_TYPES, COMMAND_VERSION_FILENAME
from ..util import directory_files
from ..util import PathHelper
log = getLogger(__name__)
def submit_job(client, client_job_description, job_config=None):
"""
"""
file_stager = FileStager(client, client_job_description, job_config)
rebuilt_command_line = file_stager.get_command_line()
job_id = file_stager.job_id
launch_kwds = dict(
command_line=rebuilt_command_line,
dependencies_description=client_job_description.dependencies_description,
env=client_job_description.env,
)
container_info = None
if client_job_description.container:
container_info = {
"container_id": client_job_description.container,
}
container_info["guest_ports"] = client_job_description.guest_ports
launch_kwds["container_info"] = container_info
if client_job_description.remote_pulsar_app_config:
launch_kwds["pulsar_app_config"] = client_job_description.remote_pulsar_app_config
if file_stager.job_config:
launch_kwds["job_config"] = file_stager.job_config
remote_staging = {}
remote_staging_actions = file_stager.transfer_tracker.remote_staging_actions
if remote_staging_actions:
remote_staging["setup"] = remote_staging_actions
# Somehow make the following optional.
remote_staging["action_mapper"] = file_stager.action_mapper.to_dict()
remote_staging["client_outputs"] = client_job_description.client_outputs.to_dict()
if remote_staging:
launch_kwds["remote_staging"] = remote_staging
client.launch(**launch_kwds)
return job_id
class FileStager(object):
"""
Objects of the FileStager class interact with an Pulsar client object to
stage the files required to run jobs on a remote Pulsar server.
**Parameters**
client : JobClient
Pulsar client object.
client_job_description : client_job_description
Description of client view of job to stage and execute remotely.
"""
def __init__(self, client, client_job_description, job_config):
"""
"""
self.client = client
self.command_line = client_job_description.command_line
self.config_files = client_job_description.config_files
self.client_inputs = client_job_description.client_inputs
self.output_files = client_job_description.output_files
if client_job_description.tool is not None:
self.tool_id = client_job_description.tool.id
self.tool_version = client_job_description.tool.version
self.tool_dir = abspath(client_job_description.tool.tool_dir)
else:
self.tool_id = None
self.tool_version = None
self.tool_dir = None
self.working_directory = client_job_description.working_directory
self.metadata_directory = client_job_description.metadata_directory
self.version_file = client_job_description.version_file
self.arbitrary_files = client_job_description.arbitrary_files
self.rewrite_paths = client_job_description.rewrite_paths
self.job_directory_files = client_job_description.job_directory_files
# Setup job inputs, these will need to be rewritten before
# shipping off to remote Pulsar server.
self.job_inputs = JobInputs(self.command_line, self.config_files)
self.action_mapper = FileActionMapper(client)
self.__handle_setup(job_config)
self.__setup_touch_outputs(client_job_description.touch_outputs)
self.transfer_tracker = TransferTracker(
client,
self.path_helper,
self.action_mapper,
self.job_inputs,
self.rewrite_paths,
self.job_directory,
)
self.__initialize_referenced_tool_files()
if self.rewrite_paths:
self.__initialize_referenced_arbitrary_files()
self.__upload_tool_files()
self.__upload_job_directory_files()
self.__upload_input_files()
self.__upload_working_directory_files()
self.__upload_metadata_directory_files()
self.__upload_arbitrary_files()
if self.rewrite_paths:
self.__initialize_output_file_renames()
self.__initialize_task_output_file_renames()
self.__initialize_config_file_renames()
self.__initialize_version_file_rename()
self.__handle_rewrites()
self.__upload_rewritten_config_files()
def __handle_setup(self, job_config):
if not job_config:
job_config = self.client.setup(self.tool_id, self.tool_version)
self.new_working_directory = job_config['working_directory']
self.new_outputs_directory = job_config['outputs_directory']
self.new_tool_directory = job_config.get('tools_directory', None)
self.new_configs_directory = job_config['configs_directory']
self.remote_separator = self.__parse_remote_separator(job_config)
self.path_helper = PathHelper(self.remote_separator)
# If remote Pulsar server assigned job id, use that otherwise
# just use local job_id assigned.
galaxy_job_id = self.client.job_id
self.job_id = job_config.get('job_id', galaxy_job_id)
if self.job_id != galaxy_job_id:
# Remote Pulsar server assigned an id different than the
# Galaxy job id, update client to reflect this.
self.client.assign_job_id(self.job_id)
self.job_config = job_config
self.job_directory = self.__setup_job_directory()
def __setup_touch_outputs(self, touch_outputs):
self.job_config['touch_outputs'] = touch_outputs
def __parse_remote_separator(self, job_config):
separator = job_config.get("system_properties", {}).get("separator", None)
if not separator: # Legacy Pulsar
separator = job_config["path_separator"] # Poorly named
return separator
def __setup_job_directory(self):
if self.client.job_directory:
return self.client.job_directory
elif self.job_config.get('job_directory', None):
return RemoteJobDirectory(
remote_staging_directory=self.job_config['job_directory'],
remote_id=None,
remote_sep=self.remote_separator,
)
else:
return None
def __initialize_referenced_tool_files(self):
# Was this following line only for interpreter, should we disable it of 16.04+ tools
self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir)
# If the tool was created with a correct $__tool_directory__ find those files and transfer
new_tool_directory = self.new_tool_directory
if not new_tool_directory:
return
for potential_tool_file in self.job_inputs.find_referenced_subfiles(new_tool_directory):
local_file = potential_tool_file.replace(new_tool_directory, self.tool_dir)
if exists(local_file):
self.referenced_tool_files.append(local_file)
def __initialize_referenced_arbitrary_files(self):
referenced_arbitrary_path_mappers = dict()
for mapper in self.action_mapper.unstructured_mappers():
mapper_pattern = mapper.to_pattern()
# TODO: Make more sophisticated, allow parent directories,
# grabbing sibbling files based on patterns, etc...
paths = self.job_inputs.find_pattern_references(mapper_pattern)
for path in paths:
if path not in referenced_arbitrary_path_mappers:
referenced_arbitrary_path_mappers[path] = mapper
for path, mapper in referenced_arbitrary_path_mappers.items():
action = self.action_mapper.action({"path": path}, path_type.UNSTRUCTURED, mapper)
unstructured_map = action.unstructured_map(self.path_helper)
self.arbitrary_files.update(unstructured_map)
def __upload_tool_files(self):
for referenced_tool_file in self.referenced_tool_files:
self.transfer_tracker.handle_transfer_path(referenced_tool_file, path_type.TOOL)
def __upload_job_directory_files(self):
for job_directory_file in self.job_directory_files:
self.transfer_tracker.handle_transfer_path(job_directory_file, path_type.JOBDIR)
def __upload_arbitrary_files(self):
for path, name in self.arbitrary_files.items():
self.transfer_tracker.handle_transfer_path(path, path_type.UNSTRUCTURED, name=name)
def __upload_input_files(self):
handled_inputs = set()
for client_input in self.client_inputs:
# TODO: use object identity to handle this.
path = client_input.path
if path in handled_inputs:
continue
if client_input.input_type == CLIENT_INPUT_PATH_TYPES.INPUT_PATH:
self.__upload_input_file(client_input.action_source)
handled_inputs.add(path)
elif client_input.input_type == CLIENT_INPUT_PATH_TYPES.INPUT_EXTRA_FILES_PATH:
self.__upload_input_extra_files(client_input.action_source)
handled_inputs.add(path)
elif client_input.input_type == CLIENT_INPUT_PATH_TYPES.INPUT_METADATA_PATH:
self.__upload_input_metadata_file(client_input.action_source)
handled_inputs.add(path)
else:
raise NotImplementedError()
def __upload_input_file(self, input_action_source):
if self.__stage_input(input_action_source):
self.transfer_tracker.handle_transfer_source(input_action_source, path_type.INPUT)
def __upload_input_extra_files(self, input_action_source):
if self.__stage_input(input_action_source):
# TODO: needs to happen else where if using remote object store staging
# but we don't have the action type yet.
self.transfer_tracker.handle_transfer_directory(path_type.INPUT, action_source=input_action_source)
def __upload_input_metadata_file(self, input_action_source):
if self.__stage_input(input_action_source):
# Name must match what is generated in remote_input_path_rewrite in path_mapper.
remote_name = "metadata_%s" % basename(input_action_source['path'])
self.transfer_tracker.handle_transfer_source(input_action_source, path_type.INPUT, name=remote_name)
def __upload_working_directory_files(self):
# Task manager stages files into working directory, these need to be
# uploaded if present.
directory = self.working_directory
if directory and exists(directory):
self.transfer_tracker.handle_transfer_directory(path_type.WORKDIR, directory=directory)
def __upload_metadata_directory_files(self):
directory = self.metadata_directory
if directory and exists(directory):
self.transfer_tracker.handle_transfer_directory(path_type.METADATA, directory=directory)
def __initialize_version_file_rename(self):
version_file = self.version_file
if version_file:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, COMMAND_VERSION_FILENAME)
self.transfer_tracker.register_rewrite(version_file, remote_path, path_type.OUTPUT)
def __initialize_output_file_renames(self):
for output_file in self.output_files:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, basename(output_file))
self.transfer_tracker.register_rewrite(output_file, remote_path, path_type.OUTPUT)
def __initialize_task_output_file_renames(self):
for output_file in self.output_files:
name = basename(output_file)
task_file = join(self.working_directory, name)
remote_path = self.path_helper.remote_join(self.new_working_directory, name)
self.transfer_tracker.register_rewrite(task_file, remote_path, path_type.OUTPUT_WORKDIR)
def __initialize_config_file_renames(self):
for config_file in self.config_files:
remote_path = self.path_helper.remote_join(self.new_configs_directory, basename(config_file))
self.transfer_tracker.register_rewrite(config_file, remote_path, path_type.CONFIG)
def __handle_rewrites(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
self.transfer_tracker.rewrite_input_paths()
def __upload_rewritten_config_files(self):
for config_file, new_config_contents in self.job_inputs.config_files.items():
self.transfer_tracker.handle_transfer_path(config_file, type=path_type.CONFIG, contents=new_config_contents)
def get_command_line(self):
"""
Returns the rewritten version of the command line to execute suitable
for remote host.
"""
return self.job_inputs.command_line
def __stage_input(self, source):
if not self.rewrite_paths:
return True
# If we have disabled path rewriting, just assume everything needs to be transferred,
# else check to ensure the file is referenced before transferring it.
return self.job_inputs.path_referenced(source['path'])
class JobInputs(object):
"""
Abstractions over dynamic inputs created for a given job (namely the command to
execute and created configfiles).
**Parameters**
command_line : str
Local command to execute for this job. (To be rewritten.)
config_files : str
Config files created for this job. (To be rewritten.)
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> def setup_inputs(tf):
... open(tf.name, "w").write(u'''world /path/to/input '/path/to/moo' "/path/to/cow" the rest''')
... inputs = JobInputs(u"hello /path/to/input", [tf.name])
... return inputs
>>> inputs = setup_inputs(tf)
>>> inputs.rewrite_paths(u"/path/to/input", u'C:\\input')
>>> inputs.command_line == u'hello C:\\\\input'
True
>>> inputs.config_files[tf.name] == u'''world C:\\\\input '/path/to/moo' "/path/to/cow" the rest'''
True
>>> tf.close()
>>> tf = tempfile.NamedTemporaryFile()
>>> inputs = setup_inputs(tf)
>>> sorted(inputs.find_referenced_subfiles('/path/to')) == [u'/path/to/cow', u'/path/to/input', u'/path/to/moo']
True
>>> inputs.path_referenced('/path/to')
True
>>> inputs.path_referenced(u'/path/to')
True
>>> inputs.path_referenced('/path/to/input')
True
>>> inputs.path_referenced('/path/to/notinput')
False
>>> tf.close()
"""
def __init__(self, command_line, config_files):
self.command_line = command_line
self.config_files = {}
for config_file in config_files or []:
config_contents = _read(config_file)
self.config_files[config_file] = config_contents
def find_pattern_references(self, pattern):
referenced_files = set()
for input_contents in self.__items():
referenced_files.update(findall(pattern, input_contents))
return list(referenced_files)
def find_referenced_subfiles(self, directory):
"""
Return list of files below specified `directory` in job inputs. Could
use more sophisticated logic (match quotes to handle spaces, handle
subdirectories, etc...).
**Parameters**
directory : str
Full path to directory to search.
"""
if directory is None:
return []
pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep))
return self.find_pattern_references(pattern)
def path_referenced(self, path):
pattern = r"%s" % path
found = False
for input_contents in self.__items():
if findall(pattern, input_contents):
found = True
break
return found
def rewrite_paths(self, local_path, remote_path):
"""
Rewrite references to `local_path` with `remote_path` in job inputs.
"""
self.__rewrite_command_line(local_path, remote_path)
self.__rewrite_config_files(local_path, remote_path)
def __rewrite_command_line(self, local_path, remote_path):
self.command_line = self.command_line.replace(local_path, remote_path)
def __rewrite_config_files(self, local_path, remote_path):
for config_file, contents in self.config_files.items():
self.config_files[config_file] = contents.replace(local_path, remote_path)
def __items(self):
items = [self.command_line]
items.extend(self.config_files.values())
return items
class TransferTracker(object):
def __init__(self, client, path_helper, action_mapper, job_inputs, rewrite_paths, job_directory):
self.client = client
self.path_helper = path_helper
self.action_mapper = action_mapper
self.job_inputs = job_inputs
self.rewrite_paths = rewrite_paths
self.job_directory = job_directory
self.file_renames = {}
self.remote_staging_actions = []
def handle_transfer_path(self, path, type, name=None, contents=None):
source = {"path": path}
return self.handle_transfer_source(source, type, name=name, contents=contents)
def handle_transfer_directory(self, type, directory=None, action_source=None):
# TODO: needs to happen else where if using remote object store staging
# but we don't have the action type yet.
if directory is None:
assert action_source is not None
action = self.__action_for_transfer(action_source, type, None)
if not action.staging_action_local and action.whole_directory_transfer_supported:
# If we're going to transfer the whole directory remotely, don't walk the files
# here.
# We could still rewrite paths and just not transfer the files.
assert not self.rewrite_paths
self.__add_remote_staging_input(self, action, None, type)
return
directory = action_source['path']
else:
assert action_source is None
for directory_file_name in directory_files(directory):
directory_file_path = join(directory, directory_file_name)
remote_name = self.path_helper.remote_name(relpath(directory_file_path, directory))
self.handle_transfer_path(directory_file_path, type, name=remote_name)
def handle_transfer_source(self, source, type, name=None, contents=None):
action = self.__action_for_transfer(source, type, contents)
if action.staging_needed:
local_action = action.staging_action_local
if local_action:
path = source['path']
if not exists(path):
message = "Pulsar: __upload_input_file called on empty or missing dataset." + \
" No such file: [%s]" % path
log.debug(message)
return
response = self.client.put_file(path, type, name=name, contents=contents, action_type=action.action_type)
def get_path():
return response['path']
else:
path = source['path']
job_directory = self.job_directory
assert job_directory, "job directory required for action %s" % action
if not name:
# TODO: consider fetching this from source so an actual input path
# isn't needed. At least it isn't used though.
name = basename(path)
self.__add_remote_staging_input(action, name, type)
def get_path():
return job_directory.calculate_path(name, type)
register = self.rewrite_paths or type == 'tool' # Even if inputs not rewritten, tool must be.
if register:
self.register_rewrite_action(action, get_path(), force=True)
elif self.rewrite_paths:
path_rewrite = action.path_rewrite(self.path_helper)
if path_rewrite:
self.register_rewrite_action(action, path_rewrite, force=True)
# else: # No action for this file
def __add_remote_staging_input(self, action, name, type):
input_dict = dict(
name=name,
type=type,
action=action.to_dict(),
)
self.remote_staging_actions.append(input_dict)
def __action_for_transfer(self, source, type, contents):
if contents:
# If contents loaded in memory, no need to write out file and copy,
# just transfer.
action = MessageAction(contents=contents, client=self.client)
else:
path = source.get("path")
if path is not None and not exists(path):
message = "__action_for_transfer called on non-existent file - [%s]" % path
log.warn(message)
raise Exception(message)
action = self.__action(source, type)
return action
def register_rewrite(self, local_path, remote_path, type, force=False):
action = self.__action({"path": local_path}, type)
self.register_rewrite_action(action, remote_path, force=force)
def register_rewrite_action(self, action, remote_path, force=False):
if action.staging_needed or force:
path = getattr(action, 'path', None)
if path:
self.file_renames[path] = remote_path
def rewrite_input_paths(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
for local_path, remote_path in self.file_renames.items():
self.job_inputs.rewrite_paths(local_path, remote_path)
def __action(self, source, type):
return self.action_mapper.action(source, type)
def _read(path):
"""
Utility method to quickly read small files (config files and tool
wrappers) into memory as bytes.
"""
input = open(path, "r", encoding="utf-8")
try:
return input.read()
finally:
input.close()
__all__ = ['submit_job']
|
tartavull/google-cloud-python
|
refs/heads/master
|
monitoring/tests/unit/test_label.py
|
3
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestLabelValueType(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.monitoring.label import LabelValueType
return LabelValueType
def test_one(self):
self.assertTrue(hasattr(self._get_target_class(), 'STRING'))
def test_names(self):
for name in self._get_target_class().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._get_target_class(), name), name)
class TestLabelDescriptor(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.monitoring.label import LabelDescriptor
return LabelDescriptor
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor = self._make_one(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, VALUE_TYPE)
self.assertEqual(descriptor.description, DESCRIPTION)
def test_constructor_defaults(self):
KEY = 'response_code'
descriptor = self._make_one(key=KEY)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, 'STRING')
self.assertEqual(descriptor.description, '')
def test_from_dict(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
info = {
'key': KEY,
'valueType': VALUE_TYPE,
'description': DESCRIPTION,
}
descriptor = self._get_target_class()._from_dict(info)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, VALUE_TYPE)
self.assertEqual(descriptor.description, DESCRIPTION)
def test_from_dict_defaults(self):
KEY = 'response_code'
info = {'key': KEY}
descriptor = self._get_target_class()._from_dict(info)
self.assertEqual(descriptor.key, KEY)
self.assertEqual(descriptor.value_type, 'STRING')
self.assertEqual(descriptor.description, '')
def test_to_dict(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor = self._make_one(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
expected = {
'key': KEY,
'valueType': VALUE_TYPE,
'description': DESCRIPTION,
}
self.assertEqual(descriptor._to_dict(), expected)
def test_to_dict_defaults(self):
KEY = 'response_code'
descriptor = self._make_one(key=KEY)
expected = {
'key': KEY,
'valueType': 'STRING',
}
self.assertEqual(descriptor._to_dict(), expected)
def test_equality(self):
KEY = 'response_code'
VALUE_TYPE = 'INT64'
DESCRIPTION = 'HTTP status code for the request.'
descriptor1 = self._make_one(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
descriptor2 = self._make_one(key=KEY, value_type=VALUE_TYPE,
description=DESCRIPTION)
self.assertTrue(descriptor1 == descriptor2)
self.assertFalse(descriptor1 != descriptor2)
|
sahana/Turkey
|
refs/heads/master
|
modules/s3db/disease.py
|
5
|
# -*- coding: utf-8 -*-
""" Sahana Eden Disease Tracking Models
@copyright: 2014-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("DiseaseDataModel",
"CaseTrackingModel",
"ContactTracingModel",
"DiseaseStatsModel",
"disease_rheader",
)
import datetime
try:
# try stdlib (Python 2.6)
import json
except ImportError:
try:
# try external module
import simplejson as json
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# Monitoring upgrades {new_level:previous_levels}
MONITORING_UPGRADE = {"OBSERVATION": ("NONE",
"FOLLOW-UP",
),
"DIAGNOSTICS": ("NONE",
"OBSERVATION",
"FOLLOW-UP",
),
"QUARANTINE": ("NONE",
"OBSERVATION",
"DIAGNOSTICS",
"FOLLOW-UP",
),
}
# =============================================================================
class DiseaseDataModel(S3Model):
names = ("disease_disease",
"disease_disease_id",
"disease_symptom",
"disease_symptom_id",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# =====================================================================
# Basic Disease Information
#
tablename = "disease_disease"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name",
requires = IS_NOT_EMPTY()
),
Field("short_name"),
Field("acronym"),
Field("code",
label = T("ICD-10-CM Code"),
),
Field("description", "text"),
Field("trace_period", "integer",
label = T("Trace Period before Symptom Debut (days)"),
),
Field("watch_period", "integer",
label = T("Watch Period after Exposure (days)"),
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup=tablename)
disease_id = S3ReusableField("disease_id", "reference %s" % tablename,
label = T("Disease"),
represent = represent,
requires = IS_ONE_OF(db, "disease_disease.id",
represent,
),
sortby = "name",
comment = S3PopupLink(f = "disease",
tooltip = T("Add a new disease to the catalog"),
),
)
self.add_components(tablename,
disease_symptom = "disease_id",
)
self.configure(tablename,
deduplicate = self.disease_duplicate,
super_entity = "doc_entity",
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Disease"),
title_display = T("Disease Information"),
title_list = T("Diseases"),
title_update = T("Edit Disease Information"),
title_upload = T("Import Disease Information"),
label_list_button = T("List Diseases"),
label_delete_button = T("Delete Disease Information"),
msg_record_created = T("Disease Information added"),
msg_record_modified = T("Disease Information updated"),
msg_record_deleted = T("Disease Information deleted"),
msg_list_empty = T("No Diseases currently registered"))
# =====================================================================
# Symptom Information
#
tablename = "disease_symptom"
define_table(tablename,
disease_id(),
Field("name"),
Field("description",
label = T("Short Description"),
),
Field("assessment",
label = T("Assessment method"),
),
*s3_meta_fields())
# @todo: refine to include disease name?
represent = S3Represent(lookup=tablename)
symptom_id = S3ReusableField("symptom_id", "reference %s" % tablename,
label = T("Symptom"),
represent = represent,
requires = IS_ONE_OF(db, "disease_symptom.id",
represent,
),
sortby = "name",
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Symptom"),
title_display = T("Symptom Information"),
title_list = T("Symptoms"),
title_update = T("Edit Symptom Information"),
title_upload = T("Import Symptom Information"),
label_list_button = T("List Symptoms"),
label_delete_button = T("Delete Symptom Information"),
msg_record_created = T("Symptom Information added"),
msg_record_modified = T("Symptom Information updated"),
msg_record_deleted = T("Symptom Information deleted"),
msg_list_empty = T("No Symptom Information currently available"))
# Pass names back to global scope (s3.*)
return dict(disease_disease_id = disease_id,
disease_symptom_id = symptom_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(disease_disease_id = lambda **attr: dummy("disease_id"),
disease_symptom_id = lambda **attr: dummy("symptom_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def disease_duplicate(item):
"""
Disease import update detection
@param item: the import item
"""
data = item.data
code = data.get("code")
name = data.get("name")
table = item.table
queries = []
if code:
queries.append((table.code == code))
if name:
queries.append((table.name == name))
if queries:
query = reduce(lambda x, y: x | y, queries)
else:
return
rows = current.db(query).select(table.id,
table.code,
table.name)
duplicate = None
for row in rows:
if code and row.code == code:
duplicate = row.id
break
if name and row.name == name:
duplicate = row.id
if duplicate:
item.id = duplicate
item.method = item.METHOD.UPDATE
return
# =============================================================================
class CaseTrackingModel(S3Model):
names = ("disease_case",
"disease_case_id",
"disease_case_monitoring",
"disease_case_monitoring_symptom",
"disease_case_diagnostics",
)
def model(self):
# @todo: add treatment component?
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
configure = self.configure
add_components = self.add_components
person_id = self.pr_person_id
# =====================================================================
# Diagnosis Status
#
diagnosis_status = {"UNKNOWN": T("Unknown"),
"RISK": T("At Risk"),
"PROBABLE": T("Probable"),
"CONFIRMED-POS": T("Confirmed Positive"),
"CONFIRMED-NEG": T("Confirmed Negative"),
}
diagnosis_status_represent = S3Represent(options = diagnosis_status)
# =====================================================================
# Monitoring Levels
#
monitoring_levels = {"NONE": T("No Monitoring"),
# Clinical observation required:
"OBSERVATION": T("Observation"),
# Targeted diagnostics required:
"DIAGNOSTICS": T("Diagnostics"),
# Quarantine required:
"QUARANTINE": T("Quarantine"),
# Follow-up after recovery:
"FOLLOW-UP": T("Post-Recovery Follow-Up"),
}
monitoring_level_represent = S3Represent(options = monitoring_levels)
# =====================================================================
# Illness status
#
illness_status = {"UNKNOWN": T("Unknown, Not Checked"),
"ASYMPTOMATIC": T("Asymptomatic, Clinical Signs Negative"),
"SYMPTOMATIC": T("Symptomatic, Clinical Signs Positive"),
"SEVERE": T("Severely Ill, Clinical Signs Positive"),
"DECEASED": T("Deceased, Clinical Signs Positive"),
"RECOVERED": T("Recovered"),
}
illness_status_represent = S3Represent(options = illness_status)
# =====================================================================
# Case
#
tablename = "disease_case"
define_table(tablename,
Field("case_number", length=64,
requires = IS_EMPTY_OR(
IS_NOT_IN_DB(db, "disease_case.case_number")),
),
person_id(empty = False,
ondelete = "CASCADE",
),
self.disease_disease_id(),
#s3_date(), # date registered == created_on?
self.gis_location_id(),
# @todo: add site ID for registering site?
# Current illness status and symptom debut
Field("illness_status",
label = T("Current Illness Status"),
represent = illness_status_represent,
requires = IS_IN_SET(illness_status),
default = "UNKNOWN",
),
s3_date("symptom_debut",
label = T("Symptom Debut"),
),
# Current diagnosis status and date of last status update
Field("diagnosis_status",
label = T("Diagnosis Status"),
represent = diagnosis_status_represent,
requires = IS_IN_SET(diagnosis_status),
default = "UNKNOWN",
),
s3_date("diagnosis_date",
default = "now",
label = T("Diagnosis Date"),
),
# Current monitoring level and end date
Field("monitoring_level",
label = T("Current Monitoring Level"),
represent = monitoring_level_represent,
requires = IS_IN_SET(monitoring_levels),
default = "NONE",
),
s3_date("monitoring_until",
label = T("Monitoring required until"),
),
*s3_meta_fields())
# Reusable Field
represent = disease_CaseRepresent()
case_id = S3ReusableField("case_id", "reference %s" % tablename,
label = T("Case"),
represent = represent,
requires = IS_ONE_OF(db, "disease_case.id",
represent,
),
comment = S3PopupLink(f = "case",
tooltip = T("Add a new case"),
),
)
# Components
add_components(tablename,
disease_case_monitoring = "case_id",
disease_case_diagnostics = "case_id",
disease_tracing = "case_id",
disease_exposure = ({"name": "exposure",
"joinby": "person_id",
"pkey": "person_id",
},
{"name": "contact",
"joinby": "case_id",
},
),
)
report_fields = ["disease_id",
"location_id",
"illness_status",
"monitoring_level",
"diagnosis_status",
]
report_options = {"rows": report_fields,
"cols": report_fields,
"fact": [(T("Number of Cases"), "count(id)"),
],
"defaults": {"rows": "location_id",
"cols": "diagnosis_status",
"fact": "count(id)",
"totals": True,
},
}
filter_widgets = [S3TextFilter(["case_number",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label = T("Search"),
comment = T("Enter Case Number or Name"),
),
S3OptionsFilter("monitoring_level",
options = monitoring_levels,
),
S3OptionsFilter("diagnosis_status",
options = diagnosis_status,
),
S3LocationFilter("location_id",
),
]
configure(tablename,
create_onvalidation = self.case_create_onvalidation,
deduplicate = self.case_duplicate,
delete_next = URL(f="case", args=["summary"]),
filter_widgets = filter_widgets,
onaccept = self.case_onaccept,
report_options = report_options,
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Case"),
title_display = T("Case Details"),
title_list = T("Cases"),
title_update = T("Edit Cases"),
title_upload = T("Import Cases"),
label_list_button = T("List Cases"),
label_delete_button = T("Delete Case"),
msg_record_created = T("Case added"),
msg_record_modified = T("Case updated"),
msg_record_deleted = T("Case deleted"),
msg_list_empty = T("No Cases currently registered"))
# =====================================================================
# Monitoring
#
tablename = "disease_case_monitoring"
define_table(tablename,
case_id(),
s3_datetime(default="now"),
Field("illness_status",
represent = illness_status_represent,
requires = IS_IN_SET(illness_status),
),
s3_comments(),
*s3_meta_fields())
# Reusable Field
represent = S3Represent(lookup=tablename, fields=["case_id"])
status_id = S3ReusableField("status_id", "reference %s" % tablename,
label = T("Case"),
represent = represent,
requires = IS_ONE_OF(db, "disease_case.id",
represent,
),
comment = S3PopupLink(f = "case",
tooltip = T("Add a new case"),
),
)
# Components
add_components(tablename,
disease_symptom = {"link": "disease_case_monitoring_symptom",
"joinby": "status_id",
"key": "symptom_id",
}
)
# Custom CRUD form
crud_fields = ["case_id",
"date",
"illness_status",
S3SQLInlineLink("symptom",
field = "symptom_id",
label = T("Symptoms"),
multiple = True,
),
"comments",
]
configure(tablename,
crud_form = S3SQLCustomForm(*crud_fields),
list_fields = ["date",
"illness_status",
(T("Symptoms"), "symptom.name"),
"comments",
],
onaccept = self.monitoring_onaccept,
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Monitoring Update"),
title_display = T("Monitoring Update"),
title_list = T("Monitoring Updates"),
title_update = T("Edit Monitoring Update"),
title_upload = T("Import Monitoring Updates"),
label_list_button = T("List Monitoring Updates"),
label_delete_button = T("Delete Monitoring Update"),
msg_record_created = T("Monitoring Update added"),
msg_record_modified = T("Monitoring Update updated"),
msg_record_deleted = T("Monitoring Update deleted"),
msg_list_empty = T("No Monitoring Information currently available"))
# =====================================================================
# Monitoring <=> Symptom
#
tablename = "disease_case_monitoring_symptom"
define_table(tablename,
Field("status_id", "reference disease_case_monitoring",
requires = IS_ONE_OF(db, "disease_case_monitoring.id"),
),
self.disease_symptom_id(),
*s3_meta_fields())
# =====================================================================
# Diagnostics
#
probe_status = {"PENDING": T("Pending"),
"PROCESSED": T("Processed"),
"VALIDATED": T("Validated"),
"INVALID": T("Invalid"),
"LOST": T("Lost"),
}
tablename = "disease_case_diagnostics"
define_table(tablename,
case_id(),
# @todo: make a lookup table in DiseaseDataModel:
Field("probe_type"),
Field("probe_number", length = 64, unique = True,
),
s3_date("probe_date",
default = "now",
label = T("Probe Date"),
),
Field("probe_status",
represent = S3Represent(options = probe_status),
requires = IS_IN_SET(probe_status),
default = "PENDING",
),
# @todo: make a lookup table in DiseaseDataModel:
Field("test_type"),
Field("result"),
s3_date("result_date",
label = T("Result Date"),
),
Field("conclusion",
represent = diagnosis_status_represent,
requires = IS_EMPTY_OR(
IS_IN_SET(diagnosis_status)),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Diagnostic Test"),
title_display = T("Diagnostic Test Details"),
title_list = T("Diagnostic Tests"),
title_update = T("Edit Diagnostic Test Details"),
title_upload = T("Import Diagnostic Test Data"),
label_list_button = T("List Diagnostic Tests"),
label_delete_button = T("Delete Diagnostic Test"),
msg_record_created = T("Diagnostic Test added"),
msg_record_modified = T("Diagnostic Test updated"),
msg_record_deleted = T("Diagnostic Test deleted"),
msg_list_empty = T("No Diagnostic Tests currently registered"))
# =====================================================================
# Pass names back to global scope (s3.*)
return dict(disease_case_id = case_id)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(disease_case_id = lambda **attr: dummy("case_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def get_case(person_id, disease_id):
"""
Find the case record for a person for a disease
@param person_id: the person record ID
@param disease_id: the disease record ID
"""
ctable = current.s3db.disease_case
query = (ctable.person_id == person_id) & \
(ctable.disease_id == disease_id) & \
(ctable.deleted != True)
record = current.db(query).select(ctable.id,
ctable.case_number,
limitby = (0, 1)).first()
return record
# -------------------------------------------------------------------------
@classmethod
def case_create_onvalidation(cls, form):
"""
Make sure that there's only one case per person and disease
"""
formvars = form.vars
try:
case_id = formvars.id
person_id = formvars.person_id
except AttributeError, e:
return
if "disease_id" not in formvars:
disease_id = current.s3db.disease_case.disease_id.default
else:
disease_id = formvars.disease_id
record = cls.get_case(person_id, disease_id)
if record and record.id != case_id:
error = current.T("This case is already registered")
link = A(record.case_number,
_href=URL(f="case", args=[record.id]))
form.errors.person_id = XML("%s: %s" % (error, link))
return
# -------------------------------------------------------------------------
@staticmethod
def case_duplicate(item):
"""
Case import update detection
@param item: the import item
"""
data = item.data
case_number = data.get("case_number")
person_id = data.get("person_id")
table = item.table
if case_number:
query = (table.case_number == case_number) & \
(table.deleted != True)
else:
disease_id = data.get("disease_id")
if person_id and disease_id:
query = (table.disease_id == disease_id) & \
(table.person_id == person_id) & \
(table.deleted != True)
else:
return
duplicate = current.db(query).select(table.id,
table.person_id,
limitby=(0, 1)).first()
if duplicate:
item.data.person_id = duplicate.person_id
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def case_onaccept(form):
"""
Propagate status updates of the case to high-risk contacts
"""
formvars = form.vars
try:
record_id = formvars.id
except AttributeError:
return
disease_propagate_case_status(record_id)
return
# -------------------------------------------------------------------------
@staticmethod
def monitoring_onaccept(form):
"""
Update the illness status of the case from last monitoring entry
"""
formvars = form.vars
try:
record_id = formvars.id
except AttributeError:
return
db = current.db
s3db = current.s3db
ctable = s3db.disease_case
mtable = s3db.disease_case_monitoring
# Get the case ID
case_id = None
if "case_id" not in formvars:
query = (mtable.id == record_id)
row = db(query).select(mtable.case_id, limitby=(0, 1)).first()
if row:
case_id = row.case_id
else:
case_id = formvars.case_id
if not case_id:
return
query = (mtable.case_id == case_id) & \
(mtable.illness_status != None)
row = db(query).select(mtable.illness_status,
orderby = "disease_case_monitoring.date desc",
limitby = (0, 1)).first()
if row:
db(ctable.id == case_id).update(illness_status = row.illness_status)
# Propagate case status to contacts
disease_propagate_case_status(case_id)
return
# =============================================================================
class disease_CaseRepresent(S3Represent):
def __init__(self):
""" Constructor """
super(disease_CaseRepresent, self).__init__(lookup = "disease_case")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=[]):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
table = self.table
ptable = s3db.pr_person
dtable = s3db.disease_disease
left = [ptable.on(ptable.id == table.person_id),
dtable.on(dtable.id == table.disease_id)]
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.case_number,
dtable.name,
dtable.short_name,
dtable.acronym,
ptable.first_name,
ptable.last_name,
left = left)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
try:
case_number = row[self.tablename].case_number
except AttributeError:
return row.case_number
disease_name = None
try:
disease = row["disease_disease"]
except AttributeError:
pass
else:
for field in ("acronym", "short_name", "name"):
if field in disease:
disease_name = disease[field]
if disease_name:
break
if disease_name and case_number:
case = "%s [%s]" % (case_number, disease_name)
elif disease_name:
case = "[%s]" % disease_name
else:
case = case_number
try:
person = row["pr_person"]
except AttributeError:
return case
full_name = s3_fullname(person)
if case:
return " ".join((case, full_name))
else:
return full_name
# =============================================================================
class ContactTracingModel(S3Model):
names = ("disease_tracing",
"disease_exposure",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
case_id = self.disease_case_id
# =====================================================================
# Tracing Information: when/where did a case pose risk for exposure?
#
# Processing Status
contact_tracing_status = {
"OPEN": T("Open"), # not all contacts identified yet
"COMPLETE": T("Complete"), # all contacts identified, closed
}
tablename = "disease_tracing"
define_table(tablename,
case_id(),
s3_datetime("start_date",
label = T("From"),
set_min="#disease_tracing_end_date",
),
s3_datetime("end_date",
label = T("To"),
set_max="#disease_tracing_start_date",
),
# @todo: add site_id?
self.gis_location_id(),
Field("circumstances", "text",
),
Field("status",
default = "OPEN",
label = T("Tracing Status"),
requires = IS_IN_SET(contact_tracing_status, zero=None),
represent = S3Represent(options=contact_tracing_status),
),
s3_comments(),
*s3_meta_fields())
# @todo: implement specific S3Represent class
represent = S3Represent(lookup=tablename, fields=["case_id"])
tracing_id = S3ReusableField("tracing_id", "reference %s" % tablename,
label = T("Tracing Record"),
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "disease_tracing.id",
represent,
)),
sortby = "date",
comment = S3PopupLink(f = "tracing",
tooltip = T("Add a new contact tracing information"),
),
)
self.add_components(tablename,
disease_exposure = "tracing_id",
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Tracing Record"),
title_display = T("Tracing Details"),
title_list = T("Contact Tracings"),
title_update = T("Edit Tracing Information"),
title_upload = T("Import Tracing Information"),
label_list_button = T("List Tracing Record"),
label_delete_button = T("Delete Tracing Record"),
msg_record_created = T("Tracing Record added"),
msg_record_modified = T("Tracing Record updated"),
msg_record_deleted = T("Tracing Record deleted"),
msg_list_empty = T("No Contact Tracings currently registered"))
# =====================================================================
# Protection
#
protection_level = {"NONE": T("Unknown"),
"PARTIAL": T("Partial"),
"FULL": T("Full"),
}
protection_level_represent = S3Represent(options = protection_level)
# =====================================================================
# Exposure Type
#
exposure_type = {"UNKNOWN": T("Unknown"),
"DIRECT": T("Direct"),
"INDIRECT": T("Indirect"),
}
exposure_type_represent = S3Represent(options = exposure_type)
# =====================================================================
# Exposure Risk Level
#
exposure_risk = {"UNKNOWN": T("Unknown"),
"NONE": T("No known exposure"),
"LOW": T("Low risk exposure"),
"HIGH": T("High risk exposure"),
}
exposure_risk_represent = S3Represent(options = exposure_risk)
# =====================================================================
# Exposure: when and how was a person exposed to the disease?
#
tablename = "disease_exposure"
define_table(tablename,
case_id(),
tracing_id(),
self.pr_person_id(requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(controller="pr"),
),
s3_datetime(),
#self.gis_location_id(),
Field("exposure_type",
default = "UNKNOWN",
represent = exposure_type_represent,
requires = IS_IN_SET(exposure_type, zero=None),
),
Field("protection_level",
default = "NONE",
represent = protection_level_represent,
requires = IS_IN_SET(protection_level, zero=None),
),
Field("exposure_risk",
default = "LOW",
represent = exposure_risk_represent,
requires = IS_IN_SET(exposure_risk, zero=None),
),
Field("circumstances", "text"),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.exposure_onaccept,
)
crud_strings[tablename] = Storage(
label_create = T("Add Exposure Information"),
title_display = T("Exposure Details"),
title_list = T("Exposure Information"),
title_update = T("Edit Exposure Information"),
title_upload = T("Import Exposure Information"),
label_list_button = T("List Exposures"),
label_delete_button = T("Delete Exposure Information"),
msg_record_created = T("Exposure Information added"),
msg_record_modified = T("Exposure Information updated"),
msg_record_deleted = T("Exposure Information deleted"),
msg_list_empty = T("No Exposure Information currently registered"))
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
return {}
# -------------------------------------------------------------------------
@staticmethod
def exposure_onaccept(form):
"""
@todo: docstring
"""
formvars = form.vars
try:
record_id = formvars.id
except AttributeError:
return
db = current.db
s3db = current.s3db
# We need case_id, person_id and exposure_risk from the current record
if "case_id" not in formvars:
etable = s3db.disease_exposure
row = db(etable.id == record_id).select(etable.case_id,
limitby = (0, 1)).first()
if not row:
return
case_id = row.case_id
else:
case_id = formvars.case_id
disease_propagate_case_status(case_id)
return
# =============================================================================
def disease_propagate_case_status(case_id):
"""
@todo: docstring
"""
db = current.db
s3db = current.s3db
risk_status = ("SYMPTOMATIC", "SEVERE", "DECEASED", "RECOVERED")
# Get the case
ctable = s3db.disease_case
query = (ctable.id == case_id) & \
(ctable.deleted != True)
case = db(query).select(ctable.id,
ctable.created_on,
ctable.disease_id,
ctable.illness_status,
ctable.symptom_debut,
ctable.diagnosis_status,
ctable.diagnosis_date,
limitby = (0, 1)).first()
if case is None:
return
disease_id = case.disease_id
# Try to establish a symptom debut
symptom_debut = case.symptom_debut
if not symptom_debut:
# Get all monitoring entries for this case
mtable = s3db.disease_case_monitoring
query = (mtable.case_id == case_id) & \
(mtable.illness_status.belongs(risk_status)) & \
(mtable.deleted != True)
monitoring = db(query).select(mtable.date,
orderby = "disease_case_monitoring.date desc",
limitby = (0, 1)).first()
if monitoring:
symptom_debut = monitoring.date
if not symptom_debut and case.illness_status in risk_status:
symptom_debut = case.created_on
if not symptom_debut:
# Case is not known to ever have shown any symptoms
return
if case.diagnosis_status == "CONFIRMED-NEG" and \
case.diagnosis_date > symptom_debut:
# Case has been declared CONFIRMED-NEG after symptom debut
return
# Establish risk period (=symptom debut minus trace period)
dtable = s3db.disease_disease
query = (dtable.id == disease_id) & \
(dtable.deleted != True)
disease = db(query).select(dtable.trace_period,
dtable.watch_period,
limitby = (0, 1)).first()
if not disease:
return
trace_period = disease.trace_period
if trace_period:
risk_period_start = symptom_debut - datetime.timedelta(days = disease.trace_period)
else:
risk_period_start = symptom_debut
# Get all high-risk exposures after risk_period_start
etable = s3db.disease_exposure
query = (etable.case_id == case_id) & \
(etable.date >= risk_period_start) & \
(etable.exposure_risk == "HIGH") & \
(etable.deleted != True)
exposures = db(query).select(etable.person_id)
for exposure in exposures:
disease_create_case(disease_id,
exposure.person_id,
monitoring_level = "OBSERVATION",
)
return
# =============================================================================
def disease_create_case(disease_id, person_id, monitoring_level=None):
"""
@todo: docstring
"""
ctable = current.s3db.disease_case
query = (ctable.person_id == person_id) & \
(ctable.disease_id == disease_id) & \
(ctable.deleted != True)
case = current.db(query).select(ctable.id,
ctable.monitoring_level,
limitby = (0, 1)).first()
if case:
case_id = case.id
if monitoring_level is not None:
disease_upgrade_monitoring(case_id,
monitoring_level,
case=case,
)
else:
case_id = ctable.insert(disease_id = disease_id,
person_id = person_id,
monitoring_level = monitoring_level,
)
return case_id
# =============================================================================
def disease_upgrade_monitoring(case_id, level, case=None):
"""
@todo: docstring
"""
if level not in MONITORING_UPGRADE:
return False
else:
previous_levels = MONITORING_UPGRADE[level]
if case is None or "monitoring_level" not in case:
ctable = current.s3db.disease_case
query = (ctable.id == case_id) & \
(ctable.monitoring_level.belongs(previous_levels)) & \
(ctable.deleted != True)
case = current.db(query).select(ctable.id,
limitby = (0, 1)).first()
elif case.monitoring_level not in previous_levels:
return
if case:
case.update_record(monitoring_level = level)
return True
# =============================================================================
class DiseaseStatsModel(S3Model):
"""
Disease Statistics:
Cases:
Confirmed/Suspected/Probable
Deaths
"""
names = ("disease_statistic",
"disease_stats_data",
"disease_stats_aggregate",
"disease_stats_rebuild_all_aggregates",
"disease_stats_update_aggregates",
"disease_stats_update_location_aggregates",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
stats_parameter_represent = S3Represent(lookup="stats_parameter",
translate=True)
# ---------------------------------------------------------------------
# Disease Statistic Parameter
#
tablename = "disease_statistic"
define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name"),
requires = IS_NOT_EMPTY(),
represent = lambda v: T(v) if v is not None \
else NONE,
),
s3_comments("description",
label = T("Description"),
),
*s3_meta_fields()
)
# CRUD Strings
ADD_STATISTIC = T("Add Statistic")
crud_strings[tablename] = Storage(
label_create = ADD_STATISTIC,
title_display = T("Statistic Details"),
title_list = T("Statistics"),
title_update = T("Edit Statistic"),
#title_upload = T("Import Statistics"),
label_list_button = T("List Statistics"),
msg_record_created = T("Statistic added"),
msg_record_modified = T("Statistic updated"),
msg_record_deleted = T("Statistic deleted"),
msg_list_empty = T("No statistics currently defined"))
configure(tablename,
deduplicate = self.disease_statistic_duplicate,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Disease Statistic Data
#
tablename = "disease_stats_data"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
instance_types = ("disease_statistic",),
label = T("Statistic"),
represent = stats_parameter_represent,
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "disease",
f = "statistic",
vars = {"child": "parameter_id"},
title = ADD_STATISTIC,
),
),
location_id(
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
),
Field("value", "double",
label = T("Value"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_NOT_EMPTY(),
),
s3_date(empty = False),
#Field("end_date", "date",
# # Just used for the year() VF
# readable = False,
# writable = False
# ),
# Link to Source
self.stats_source_id(),
s3_comments(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Disease Data"),
title_display = T("Disease Data Details"),
title_list = T("Disease Data"),
title_update = T("Edit Disease Data"),
title_upload = T("Import Disease Data"),
label_list_button = T("List Disease Data"),
msg_record_created = T("Disease Data added"),
msg_record_modified = T("Disease Data updated"),
msg_record_deleted = T("Disease Data deleted"),
msg_list_empty = T("No disease data currently available"))
levels = current.gis.get_relevant_hierarchy_levels()
location_fields = ["location_id$%s" % level for level in levels]
list_fields = ["parameter_id"]
list_fields.extend(location_fields)
list_fields.extend((("value",
"date",
"source_id",
)))
filter_widgets = [S3OptionsFilter("parameter_id",
label = T("Type"),
multiple = False,
# Not translateable
#represent = "%(name)s",
),
S3OptionsFilter("location_id$level",
label = T("Level"),
multiple = False,
# Not translateable
#represent = "%(name)s",
),
S3LocationFilter("location_id",
levels = levels,
),
]
report_options = Storage(rows = location_fields,
cols = ["parameter_id"],
fact = [(T("Value"), "sum(value)"),
],
defaults = Storage(rows = location_fields[0], # => L0 for multi-country, L1 for single country
cols = "parameter_id",
fact = "sum(value)",
totals = True,
chart = "breakdown:rows",
table = "collapse",
)
)
configure(tablename,
deduplicate = self.disease_stats_data_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
# @ToDo: Wrapper function to call this for the record linked
# to the relevant place depending on whether approval is
# required or not. Disable when auth.override is True.
#onaccept = self.disease_stats_update_aggregates,
#onapprove = self.disease_stats_update_aggregates,
# @ToDo: deployment_setting
#requires_approval = True,
super_entity = "stats_data",
timeplot_options = {"defaults": {"event_start": "date",
"event_end": "date",
"fact": "cumulate(value)",
},
},
)
#----------------------------------------------------------------------
# Disease Aggregated data
#
# The data can be aggregated against:
# location, all the aggregated values across a number of locations
# thus for an L2 it will aggregate all the L3 values
# time, sum of all the disease_stats_data values up to this time.
# allowing us to report on cumulative values
aggregate_types = {1 : T("Time"),
2 : T("Location"),
}
tablename = "disease_stats_aggregate"
define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
empty = False,
instance_types = ("disease_statistic",),
label = T("Statistic"),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
),
location_id(
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
),
Field("agg_type", "integer",
default = 1,
label = T("Aggregation Type"),
represent = lambda opt: \
aggregate_types.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_IN_SET(aggregate_types),
),
s3_date("date",
label = T("Start Date"),
),
Field("sum", "double",
label = T("Sum"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
disease_stats_rebuild_all_aggregates = self.disease_stats_rebuild_all_aggregates,
disease_stats_update_aggregates = self.disease_stats_update_aggregates,
disease_stats_update_location_aggregates = self.disease_stats_update_location_aggregates,
)
# -------------------------------------------------------------------------
@staticmethod
def disease_statistic_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name")
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def disease_stats_data_duplicate(item):
""" Import item de-duplication """
data = item.data
parameter_id = data.get("parameter_id")
location_id = data.get("location_id")
date = data.get("date")
table = item.table
query = (table.date == date) & \
(table.location_id == location_id) & \
(table.parameter_id == parameter_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def disease_stats_rebuild_all_aggregates():
"""
This will delete all the disease_stats_aggregate records and
then rebuild them by triggering off a request for each
disease_stats_data record.
This function is normally only run during prepop or postpop so we
don't need to worry about the aggregate data being unavailable for
any length of time
"""
# Check to see whether an existing task is running and if it is then kill it
db = current.db
ttable = db.scheduler_task
rtable = db.scheduler_run
wtable = db.scheduler_worker
query = (ttable.task_name == "disease_stats_update_aggregates") & \
(rtable.task_id == ttable.id) & \
(rtable.status == "RUNNING")
rows = db(query).select(rtable.id,
rtable.task_id,
rtable.worker_name)
now = current.request.utcnow
for row in rows:
db(wtable.worker_name == row.worker_name).update(status="KILL")
db(rtable.id == row.id).update(stop_time=now,
status="STOPPED")
db(ttable.id == row.task_id).update(stop_time=now,
status="STOPPED")
# Delete the existing aggregates
current.s3db.disease_stats_aggregate.truncate()
# Read all the disease_stats_data records
dtable = db.disease_stats_data
query = (dtable.deleted != True)
# @ToDo: deployment_setting to make this just the approved records
#query &= (dtable.approved_by != None)
records = db(query).select(dtable.parameter_id,
dtable.date,
dtable.value,
dtable.location_id,
)
# Fire off a rebuild task
current.s3task.async("disease_stats_update_aggregates",
vars = dict(records=records.json(), all=True),
timeout = 21600 # 6 hours
)
# -------------------------------------------------------------------------
@staticmethod
def disease_stats_update_aggregates(records=None, all=False):
"""
This will calculate the disease_stats_aggregates for the specified
records. Either all (when rebuild_all is invoked) or for the
individual parameter(s) at the specified location(s) when run
onaccept/onapprove.
@ToDo: onapprove/onaccept wrapper function.
This will get the raw data from disease_stats_data and generate
a disease_stats_aggregate record for the given time period.
The reason for doing this is so that all aggregated data can be
obtained from a single table. So when displaying data for a
particular location it will not be necessary to try the aggregate
table, and if it's not there then try the data table. Rather just
look at the aggregate table.
Once this has run then a complete set of aggregate records should
exists for this parameter_id and location for every time period from
the first data item until the current time period.
@ToDo: Add test cases to modules/unit_tests/s3db/disease.py
"""
if not records:
return
# Test to see which date format we have based on how we were called
if isinstance(records, basestring):
from_json = True
from dateutil.parser import parse
records = json.loads(records)
elif isinstance(records[0]["date"],
(datetime.date, datetime.datetime)):
from_json = False
else:
from_json = True
from dateutil.parser import parse
db = current.db
s3db = current.s3db
atable = db.disease_stats_aggregate
if not all:
# Read the database to get all the relevant records
# @ToDo: Complete this
return
dtable = s3db.disease_stats_data
# For each location/parameter pair, create a time-aggregate summing all
# the data so far
now = current.request.now
# Assemble raw data
earliest_period = now.date()
locations = {}
parameters = []
pappend = parameters.append
for record in records:
location_id = record["location_id"]
if location_id not in locations:
locations[location_id] = {}
parameter_id = record["parameter_id"]
if parameter_id not in parameters:
pappend(parameter_id)
if parameter_id not in locations[location_id]:
locations[location_id][parameter_id] = {}
if from_json:
date = parse(record["date"]) # produces a datetime
date = date.date()
else:
date = record["date"]
if date < earliest_period:
earliest_period = date
locations[location_id][parameter_id][date] = record["value"]
# Full range of dates
# 1 per day from the start of the data to the present day
from dateutil.rrule import rrule, DAILY
dates = rrule(DAILY, dtstart=earliest_period, until=now)
dates = [d.date() for d in dates]
# Add the sums
insert = atable.insert
lfield = atable.location_id
pfield = atable.parameter_id
dfield = atable.date
ifield = atable.id
_q = (atable.agg_type == 1)
for location_id in locations:
location = locations[location_id]
query = _q & (lfield == location_id)
for parameter_id in location:
parameter = location[parameter_id]
q = query & (pfield == parameter_id)
for d in dates:
values = []
vappend = values.append
for date in parameter:
if date <= d:
vappend(parameter[date])
values_sum = sum(values)
exists = db(q & (dfield == d)).select(ifield,
limitby=(0, 1))
if exists:
db(ifield == exists.first().id).update(sum = values_sum)
else:
insert(agg_type = 1, # Time
location_id = location_id,
parameter_id = parameter_id,
date = d,
sum = values_sum,
)
# For each location/parameter pair, build a location-aggregate for all
# ancestors, by level (immediate parents first).
# Ensure that we don't duplicate builds
# Do this for all dates between the changed date and the current date
# Get all the ancestors
# Read all the Paths
# NB Currently we're assuming that all Paths have been built correctly
gtable = db.gis_location
ifield = gtable.id
location_ids = locations.keys()
paths = db(ifield.belongs(location_ids)).select(gtable.path)
paths = [p.path.split("/") for p in paths]
# Convert list of lists to flattened list & remove duplicates
import itertools
ancestors = tuple(itertools.chain.from_iterable(paths))
# Remove locations which we already have data for
ancestors = [a for a in ancestors if a not in location_ids]
# Get all the children for each ancestor (immediate children not descendants)
pfield = gtable.parent
query = (gtable.deleted == False) & \
(pfield.belongs(ancestors))
all_children = db(query).select(ifield,
pfield)
# Read the levels
rows = db(ifield.belongs(ancestors)).select(ifield,
gtable.level)
L0 = []
L0_append = L0.append
L1 = []
L1_append = L1.append
L2 = []
L2_append = L2.append
L3 = []
L3_append = L3.append
L4 = []
L4_append = L4.append
for row in rows:
if row.level == "L0":
L0_append(row.id)
elif row.level == "L1":
L1_append(row.id)
elif row.level == "L2":
L2_append(row.id)
elif row.level == "L3":
L3_append(row.id)
elif row.level == "L4":
L4_append(row.id)
async = current.s3task.async
from gluon.serializers import json as jsons
dates = jsons(dates)
# Build the lowest level first
for level in (L4, L3, L2, L1):
for location_id in level:
children = [c.id for c in all_children if c.parent == location_id]
children = json.dumps(children)
for parameter_id in parameters:
async("disease_stats_update_location_aggregates",
args = [location_id, children, parameter_id, dates],
timeout = 1800 # 30m
)
# -------------------------------------------------------------------------
@staticmethod
def disease_stats_update_location_aggregates(location_id,
children,
parameter_id,
dates,
):
"""
Calculates the disease_stats_aggregate for a specific parameter at a
specific location over the range of dates.
@param location_id: location to aggregate at
@param children: locations to aggregate from
@param parameter_id: arameter to aggregate
@param dates: dates to aggregate for (as JSON string)
"""
db = current.db
atable = current.s3db.disease_stats_aggregate
ifield = atable.id
lfield = atable.location_id
pfield = atable.parameter_id
dfield = atable.date
children = json.loads(children)
# Get the most recent disease_stats_aggregate record for all child locations
# - doesn't matter whether this is a time or location aggregate
query = (pfield == parameter_id) & \
(atable.deleted != True) & \
(lfield.belongs(children))
rows = db(query).select(atable.sum,
dfield,
lfield,
orderby=(lfield, ~dfield),
# groupby avoids duplicate records for the same
# location, but is slightly slower than just
# skipping the duplicates in the loop below
#groupby=(lfield)
)
if not rows:
return
# Lookup which records already exist
query = (lfield == location_id) & \
(pfield == parameter_id)
existing = db(query).select(ifield,
dfield,
)
exists = {}
for e in existing:
exists[e.date] = e.id
from dateutil.parser import parse
dates = json.loads(dates)
insert = atable.insert
for date in dates:
# Collect the values, skip duplicate records for the
# same location => use the most recent one, which is
# the first row for each location as per the orderby
# in the query above
date = parse(date) # produces a datetime
date = date.date()
last_location = None
values = []
vappend = values.append
for row in rows:
if date < row.date:
# Skip
continue
new_location_id = row.location_id
if new_location_id != last_location:
last_location = new_location_id
vappend(row.sum)
if values:
# Aggregate the values
values_sum = sum(values)
else:
values_sum = 0
# Add or update the aggregated values in the database
attr = dict(agg_type = 2, # Location
sum = values_sum,
)
# Do we already have a record?
if date in exists:
db(ifield == exists[date]).update(**attr)
else:
# Insert new
insert(parameter_id = parameter_id,
location_id = location_id,
date = date,
**attr
)
# =============================================================================
def disease_rheader(r, tabs=None):
"""
Resource Header for Disease module
"""
T = current.T
if r.representation != "html":
return None
resourcename = r.name
if resourcename == "disease":
tabs = ((T("Basic Details"), None),
(T("Symptoms"), "symptom"),
(T("Documents"), "document"),
)
rheader_fields = (["name"],
["code"],
)
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "case":
tabs = ((T("Basic Details"), None),
(T("Exposure"), "exposure"),
(T("Monitoring"), "case_monitoring"),
(T("Diagnostics"), "case_diagnostics"),
(T("Contacts"), "contact"),
(T("Tracing"), "tracing"),
)
rheader_fields = (["person_id"],
)
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "tracing":
tabs = ((T("Basic Details"), None),
(T("Contact Persons"), "exposure"),
)
rheader_fields = (["case_id"],
)
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
else:
rheader = ""
return rheader
# END =========================================================================
|
tempbottle/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/venv/__main__.py
|
124
|
import sys
from . import main
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
|
mariansoban/ardupilot
|
refs/heads/Copter-4.0.x-sobi-phl-8m
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32F745xx.py
|
10
|
#!/usr/bin/env python
'''
these tables are generated from the STM32 datasheets for the
STM32F74x
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f7xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32F7xx/platform.mk"
}
# MCU parameters
mcu = {
# location of MCU serial number
'UDID_START' : 0x1FF0F420,
# ram map, as list of (address, size-kb, flags)
# flags of 1 means DMA-capable
# flags of 2 means faster memory for CPU intensive work
'RAM_MAP' : [
(0x20010000, 256, 0), # main memory, not DMA safe
(0x20000000, 64, 1), # DTCM memory, DMA safe
]
}
DMA_Map = {
# format is (DMA_TABLE, StreamNum, Channel)
# extracted from tabula-STM32F745-222.csv
"ADC1" : [(2,0,0),(2,4,0)],
"ADC2" : [(2,2,1),(2,3,1)],
"ADC3" : [(2,0,2),(2,1,2)],
"CRYP_IN" : [(2,6,2)],
"CRYP_OUT" : [(2,5,2)],
"DAC1" : [(1,5,7)],
"DAC2" : [(1,6,7)],
"DCMI" : [(2,1,1),(2,7,1)],
"HASH_IN" : [(2,7,2)],
"I2C1_RX" : [(1,0,1),(1,5,1)],
"I2C1_TX" : [(1,6,1),(1,7,1)],
"I2C2_RX" : [(1,2,7),(1,3,7)],
"I2C2_TX" : [(1,7,7)],
"I2C3_RX" : [(1,1,1),(1,2,3)],
"I2C3_TX" : [(1,4,3)],
"I2C4" : [(1,2,2),(1,5,2)],
"QUADSPI" : [(2,7,3)],
"SAI1_A" : [(2,1,0),(2,3,0)],
"SAI1_B" : [(2,5,0),(2,4,1)],
"SAI2_A" : [(2,4,3)],
"SAI2_B" : [(2,7,0),(2,6,3)],
"SDMMC1" : [(2,3,4),(2,6,4)],
"SPDIFRX_CS" : [(1,6,0)],
"SPDIFRX_DT" : [(1,1,0)],
"SPI1_RX" : [(2,0,3),(2,2,3)],
"SPI1_TX" : [(2,3,3),(2,5,3)],
"SPI2_RX" : [(1,3,0)],
"SPI2_TX" : [(1,4,0)],
"SPI3_RX" : [(1,0,0),(1,2,0)],
"SPI3_TX" : [(1,5,0),(1,7,0)],
"SPI4_RX" : [(2,0,4),(2,3,5)],
"SPI4_TX" : [(2,1,4),(2,4,5)],
"SPI5_RX" : [(2,3,2),(2,5,7)],
"SPI5_TX" : [(2,4,2),(2,6,7)],
"SPI6_RX" : [(2,6,1)],
"SPI6_TX" : [(2,5,1)],
"TIM1_CH1" : [(2,6,0),(2,1,6),(2,3,6)],
"TIM1_CH2" : [(2,6,0),(2,2,6)],
"TIM1_CH3" : [(2,6,0),(2,6,6)],
"TIM1_CH4" : [(2,4,6)],
"TIM1_COM" : [(2,4,6)],
"TIM1_TRIG" : [(2,0,6),(2,4,6)],
"TIM1_UP" : [(2,5,6)],
"TIM2_CH1" : [(1,5,3)],
"TIM2_CH2" : [(1,6,3)],
"TIM2_CH3" : [(1,1,3)],
"TIM2_CH4" : [(1,6,3),(1,7,3)],
"TIM2_UP" : [(1,1,3),(1,7,3)],
"TIM3_CH1" : [(1,4,5)],
"TIM3_CH2" : [(1,5,5)],
"TIM3_CH3" : [(1,7,5)],
"TIM3_CH4" : [(1,2,5)],
"TIM3_TRIG" : [(1,4,5)],
"TIM3_UP" : [(1,2,5)],
"TIM4_CH1" : [(1,0,2)],
"TIM4_CH2" : [(1,3,2)],
"TIM4_CH3" : [(1,7,2)],
"TIM4_UP" : [(1,6,2)],
"TIM5_CH1" : [(1,2,6)],
"TIM5_CH2" : [(1,4,6)],
"TIM5_CH3" : [(1,0,6)],
"TIM5_CH4" : [(1,1,6),(1,3,6)],
"TIM5_TRIG" : [(1,1,6),(1,3,6)],
"TIM5_UP" : [(1,0,6),(1,6,6)],
"TIM6_UP" : [(1,1,7)],
"TIM7_UP" : [(1,2,1),(1,4,1)],
"TIM8_CH1" : [(2,2,0),(2,2,7)],
"TIM8_CH2" : [(2,2,0),(2,3,7)],
"TIM8_CH3" : [(2,2,0),(2,4,7)],
"TIM8_CH4" : [(2,7,7)],
"TIM8_COM" : [(2,7,7)],
"TIM8_TRIG" : [(2,7,7)],
"TIM8_UP" : [(2,1,7)],
"UART4_RX" : [(1,2,4)],
"UART4_TX" : [(1,4,4)],
"UART5_RX" : [(1,0,4)],
"UART5_TX" : [(1,7,4)],
"UART7_RX" : [(1,3,5)],
"UART7_TX" : [(1,1,5)],
"UART8_RX" : [(1,6,5)],
"UART8_TX" : [(1,0,5)],
"USART1_RX" : [(2,2,4),(2,5,4)],
"USART1_TX" : [(2,7,4)],
"USART2_RX" : [(1,5,4)],
"USART2_TX" : [(1,6,4)],
"USART3_RX" : [(1,1,4)],
"USART3_TX" : [(1,3,4),(1,4,7)],
"USART6_RX" : [(2,1,5),(2,2,5)],
"USART6_TX" : [(2,6,5),(2,7,5)],
"_RX" : [(1,2,2),(1,5,2)],
}
AltFunction_map = {
# format is PIN:FUNCTION : AFNUM
# extracted from tabula-AF-F745.csv
"PA0:ETH_MII_CRS" : 11,
"PA0:EVENTOUT" : 15,
"PA0:SAI2_SD_B" : 10,
"PA0:TIM2_CH1" : 1,
"PA0:TIM2_ETR" : 1,
"PA0:TIM5_CH1" : 2,
"PA0:TIM8_ETR" : 3,
"PA0:UART4_TX" : 8,
"PA0:USART2_CTS" : 7,
"PA1:ETH_MII_RX_CLK" : 11,
"PA1:ETH_RMII_REF_CLK" : 11,
"PA1:EVENTOUT" : 15,
"PA1:LCD_R2" : 14,
"PA1:QUADSPI_BK1_IO3" : 9,
"PA1:SAI2_MCK_B" : 10,
"PA1:TIM2_CH2" : 1,
"PA1:TIM5_CH2" : 2,
"PA1:UART4_RX" : 8,
"PA1:USART2_RTS" : 7,
"PA2:ETH_MDIO" : 11,
"PA2:EVENTOUT" : 15,
"PA2:LCD_R1" : 14,
"PA2:SAI2_SCK_B" : 8,
"PA2:TIM2_CH3" : 1,
"PA2:TIM5_CH3" : 2,
"PA2:TIM9_CH1" : 3,
"PA2:USART2_TX" : 7,
"PA3:ETH_MII_COL" : 11,
"PA3:EVENTOUT" : 15,
"PA3:LCD_B5" : 14,
"PA3:OTG_HS_ULPI_D0" : 10,
"PA3:TIM2_CH4" : 1,
"PA3:TIM5_CH4" : 2,
"PA3:TIM9_CH2" : 3,
"PA3:USART2_RX" : 7,
"PA4:DCMI_HSYNC" : 13,
"PA4:EVENTOUT" : 15,
"PA4:I2S1_WS" : 5,
"PA4:I2S3_WS" : 6,
"PA4:LCD_VSYNC" : 14,
"PA4:OTG_HS_SOF" : 12,
"PA4:SPI1_NSS" : 5,
"PA4:SPI3_NSS" : 6,
"PA4:USART2_CK" : 7,
"PA5:EVENTOUT" : 15,
"PA5:I2S1_CK" : 5,
"PA5:LCD_R4" : 14,
"PA5:OTG_HS_ULPI_CK" : 10,
"PA5:SPI1_SCK" : 5,
"PA5:TIM2_CH1" : 1,
"PA5:TIM2_ETR" : 1,
"PA5:TIM8_CH1N" : 3,
"PA6:DCMI_PIXCLK" : 13,
"PA6:EVENTOUT" : 15,
"PA6:LCD_G2" : 14,
"PA6:SPI1_MISO" : 5,
"PA6:TIM13_CH1" : 9,
"PA6:TIM1_BKIN" : 1,
"PA6:TIM3_CH1" : 2,
"PA6:TIM8_BKIN" : 3,
"PA7:ETH_MII_RX_DV" : 11,
"PA7:ETH_RMII_CRS_DV" : 11,
"PA7:EVENTOUT" : 15,
"PA7:FMC_SDNWE" : 12,
"PA7:I2S1_SD" : 5,
"PA7:SPI1_MOSI" : 5,
"PA7:TIM14_CH1" : 9,
"PA7:TIM1_CH1N" : 1,
"PA7:TIM3_CH2" : 2,
"PA7:TIM8_CH1N" : 3,
"PA8:EVENTOUT" : 15,
"PA8:I2C3_SCL" : 4,
"PA8:LCD_R6" : 14,
"PA8:MCO1" : 0,
"PA8:OTG_FS_SOF" : 10,
"PA8:TIM1_CH1" : 1,
"PA8:TIM8_BKIN2" : 3,
"PA8:USART1_CK" : 7,
"PA9:DCMI_D0" : 13,
"PA9:EVENTOUT" : 15,
"PA9:I2C3_SMBA" : 4,
"PA9:I2S2_CK" : 5,
"PA9:SPI2_SCK" : 5,
"PA9:TIM1_CH2" : 1,
"PA9:USART1_TX" : 7,
"PA10:DCMI_D1" : 13,
"PA10:EVENTOUT" : 15,
"PA10:OTG_FS_ID" : 10,
"PA10:TIM1_CH3" : 1,
"PA10:USART1_RX" : 7,
"PA11:CAN1_RX" : 9,
"PA11:EVENTOUT" : 15,
"PA11:LCD_R4" : 14,
"PA11:OTG_FS_DM" : 10,
"PA11:TIM1_CH4" : 1,
"PA11:USART1_CTS" : 7,
"PA12:CAN1_TX" : 9,
"PA12:EVENTOUT" : 15,
"PA12:LCD_R5" : 14,
"PA12:OTG_FS_DP" : 10,
"PA12:SAI2_FS_B" : 8,
"PA12:TIM1_ETR" : 1,
"PA12:USART1_RTS" : 7,
"PA13:EVENTOUT" : 15,
"PA13:JTMS-SWDIO" : 0,
"PA14:EVENTOUT" : 15,
"PA14:JTCK-SWCLK" : 0,
"PA15:EVENTOUT" : 15,
"PA15:HDMI-CEC" : 4,
"PA15:I2S1_WS" : 5,
"PA15:I2S3_WS" : 6,
"PA15:JTDI" : 0,
"PA15:SPI1_NSS" : 5,
"PA15:SPI3_NSS" : 6,
"PA15:TIM2_CH1" : 1,
"PA15:TIM2_ETR" : 1,
"PA15:UART4_RTS" : 8,
"PB0:ETH_MII_RXD2" : 11,
"PB0:EVENTOUT" : 15,
"PB0:LCD_R3" : 9,
"PB0:OTG_HS_ULPI_D1" : 10,
"PB0:TIM1_CH2N" : 1,
"PB0:TIM3_CH3" : 2,
"PB0:TIM8_CH2N" : 3,
"PB0:UART4_CTS" : 8,
"PB1:ETH_MII_RXD3" : 11,
"PB1:EVENTOUT" : 15,
"PB1:LCD_R6" : 9,
"PB1:OTG_HS_ULPI_D2" : 10,
"PB1:TIM1_CH3N" : 1,
"PB1:TIM3_CH4" : 2,
"PB1:TIM8_CH3N" : 3,
"PB2:EVENTOUT" : 15,
"PB2:I2S3_SD" : 7,
"PB2:QUADSPI_CLK" : 9,
"PB2:SAI1_SD_A" : 6,
"PB2:SPI3_MOSI" : 7,
"PB3:EVENTOUT" : 15,
"PB3:I2S1_CK" : 5,
"PB3:I2S3_CK" : 6,
"PB3:JTDO" : 0,
"PB3:SPI1_SCK" : 5,
"PB3:SPI3_SCK" : 6,
"PB3:TIM2_CH2" : 1,
"PB3:TRACESWO" : 0,
"PB4:EVENTOUT" : 15,
"PB4:I2S2_WS" : 7,
"PB4:NJTRST" : 0,
"PB4:SPI1_MISO" : 5,
"PB4:SPI2_NSS" : 7,
"PB4:SPI3_MISO" : 6,
"PB4:TIM3_CH1" : 2,
"PB5:CAN2_RX" : 9,
"PB5:DCMI_D10" : 13,
"PB5:ETH_PPS_OUT" : 11,
"PB5:EVENTOUT" : 15,
"PB5:FMC_SDCKE1" : 12,
"PB5:I2C1_SMBA" : 4,
"PB5:I2S1_SD" : 5,
"PB5:I2S3_SD" : 6,
"PB5:OTG_HS_ULPI_D7" : 10,
"PB5:SPI1_MOSI" : 5,
"PB5:SPI3_MOSI" : 6,
"PB5:TIM3_CH2" : 2,
"PB6:CAN2_TX" : 9,
"PB6:DCMI_D5" : 13,
"PB6:EVENTOUT" : 15,
"PB6:FMC_SDNE1" : 12,
"PB6:HDMI-CEC" : 3,
"PB6:I2C1_SCL" : 4,
"PB6:QUADSPI_BK1_NCS" : 10,
"PB6:TIM4_CH1" : 2,
"PB6:USART1_TX" : 7,
"PB7:DCMI_VSYNC" : 13,
"PB7:EVENTOUT" : 15,
"PB7:FMC_NL" : 12,
"PB7:I2C1_SDA" : 4,
"PB7:TIM4_CH2" : 2,
"PB7:USART1_RX" : 7,
"PB8:CAN1_RX" : 9,
"PB8:DCMI_D6" : 13,
"PB8:ETH_MII_TXD3" : 11,
"PB8:EVENTOUT" : 15,
"PB8:I2C1_SCL" : 4,
"PB8:LCD_B6" : 14,
"PB8:SDMMC1_D4" : 12,
"PB8:TIM10_CH1" : 3,
"PB8:TIM4_CH3" : 2,
"PB9:CAN1_TX" : 9,
"PB9:DCMI_D7" : 13,
"PB9:EVENTOUT" : 15,
"PB9:I2C1_SDA" : 4,
"PB9:I2S2_WS" : 5,
"PB9:LCD_B7" : 14,
"PB9:SDMMC1_D5" : 12,
"PB9:SPI2_NSS" : 5,
"PB9:TIM11_CH1" : 3,
"PB9:TIM4_CH4" : 2,
"PB10:ETH_MII_RX_ER" : 11,
"PB10:EVENTOUT" : 15,
"PB10:I2C2_SCL" : 4,
"PB10:I2S2_CK" : 5,
"PB10:LCD_G4" : 14,
"PB10:OTG_HS_ULPI_D3" : 10,
"PB10:SPI2_SCK" : 5,
"PB10:TIM2_CH3" : 1,
"PB10:USART3_TX" : 7,
"PB11:ETH_MII_TX_EN" : 11,
"PB11:ETH_RMII_TX_EN" : 11,
"PB11:EVENTOUT" : 15,
"PB11:I2C2_SDA" : 4,
"PB11:LCD_G5" : 14,
"PB11:OTG_HS_ULPI_D4" : 10,
"PB11:TIM2_CH4" : 1,
"PB11:USART3_RX" : 7,
"PB12:CAN2_RX" : 9,
"PB12:ETH_MII_TXD0" : 11,
"PB12:ETH_RMII_TXD0" : 11,
"PB12:EVENTOUT" : 15,
"PB12:I2C2_SMBA" : 4,
"PB12:I2S2_WS" : 5,
"PB12:OTG_HS_ID" : 12,
"PB12:OTG_HS_ULPI_D5" : 10,
"PB12:SPI2_NSS" : 5,
"PB12:TIM1_BKIN" : 1,
"PB12:USART3_CK" : 7,
"PB13:CAN2_TX" : 9,
"PB13:ETH_MII_TXD1" : 11,
"PB13:ETH_RMII_TXD1" : 11,
"PB13:EVENTOUT" : 15,
"PB13:I2S2_CK" : 5,
"PB13:OTG_HS_ULPI_D6" : 10,
"PB13:SPI2_SCK" : 5,
"PB13:TIM1_CH1N" : 1,
"PB13:USART3_CTS" : 7,
"PB14:EVENTOUT" : 15,
"PB14:OTG_HS_DM" : 12,
"PB14:SPI2_MISO" : 5,
"PB14:TIM12_CH1" : 9,
"PB14:TIM1_CH2N" : 1,
"PB14:TIM8_CH2N" : 3,
"PB14:USART3_RTS" : 7,
"PB15:EVENTOUT" : 15,
"PB15:I2S2_SD" : 5,
"PB15:OTG_HS_DP" : 12,
"PB15:RTC_REFIN" : 0,
"PB15:SPI2_MOSI" : 5,
"PB15:TIM12_CH2" : 9,
"PB15:TIM1_CH3N" : 1,
"PB15:TIM8_CH3N" : 3,
"PC0:EVENTOUT" : 15,
"PC0:FMC_SDNWE" : 12,
"PC0:LCD_R5" : 14,
"PC0:OTG_HS_ULPI_STP" : 10,
"PC0:SAI2_FS_B" : 8,
"PC1:ETH_MDC" : 11,
"PC1:EVENTOUT" : 15,
"PC1:I2S2_SD" : 5,
"PC1:SAI1_SD_A" : 6,
"PC1:SPI2_MOSI" : 5,
"PC1:TRACED0" : 0,
"PC2:ETH_MII_TXD2" : 11,
"PC2:EVENTOUT" : 15,
"PC2:FMC_SDNE0" : 12,
"PC2:OTG_HS_ULPI_DIR" : 10,
"PC2:SPI2_MISO" : 5,
"PC3:ETH_MII_TX_CLK" : 11,
"PC3:EVENTOUT" : 15,
"PC3:FMC_SDCKE0" : 12,
"PC3:I2S2_SD" : 5,
"PC3:OTG_HS_ULPI_NXT" : 10,
"PC3:SPI2_MOSI" : 5,
"PC4:ETH_MII_RXD0" : 11,
"PC4:ETH_RMII_RXD0" : 11,
"PC4:EVENTOUT" : 15,
"PC4:FMC_SDNE0" : 12,
"PC4:I2S1_MCK" : 5,
"PC4:SPDIFRX_IN2" : 8,
"PC5:ETH_MII_RXD1" : 11,
"PC5:ETH_RMII_RXD1" : 11,
"PC5:EVENTOUT" : 15,
"PC5:FMC_SDCKE0" : 12,
"PC5:SPDIFRX_IN3" : 8,
"PC6:DCMI_D0" : 13,
"PC6:EVENTOUT" : 15,
"PC6:I2S2_MCK" : 5,
"PC6:LCD_HSYNC" : 14,
"PC6:SDMMC1_D6" : 12,
"PC6:TIM3_CH1" : 2,
"PC6:TIM8_CH1" : 3,
"PC6:USART6_TX" : 8,
"PC7:DCMI_D1" : 13,
"PC7:EVENTOUT" : 15,
"PC7:I2S3_MCK" : 6,
"PC7:LCD_G6" : 14,
"PC7:SDMMC1_D7" : 12,
"PC7:TIM3_CH2" : 2,
"PC7:TIM8_CH2" : 3,
"PC7:USART6_RX" : 8,
"PC8:DCMI_D2" : 13,
"PC8:EVENTOUT" : 15,
"PC8:SDMMC1_D0" : 12,
"PC8:TIM3_CH3" : 2,
"PC8:TIM8_CH3" : 3,
"PC8:TRACED1" : 0,
"PC8:UART5_RTS" : 7,
"PC8:USART6_CK" : 8,
"PC9:DCMI_D3" : 13,
"PC9:EVENTOUT" : 15,
"PC9:I2C3_SDA" : 4,
"PC9:I2S_CKIN" : 5,
"PC9:MCO2" : 0,
"PC9:QUADSPI_BK1_IO0" : 9,
"PC9:SDMMC1_D1" : 12,
"PC9:TIM3_CH4" : 2,
"PC9:TIM8_CH4" : 3,
"PC9:UART5_CTS" : 7,
"PC10:DCMI_D8" : 13,
"PC10:EVENTOUT" : 15,
"PC10:I2S3_CK" : 6,
"PC10:LCD_R2" : 14,
"PC10:QUADSPI_BK1_IO1" : 9,
"PC10:SDMMC1_D2" : 12,
"PC10:SPI3_SCK" : 6,
"PC10:UART4_TX" : 8,
"PC10:USART3_TX" : 7,
"PC11:DCMI_D4" : 13,
"PC11:EVENTOUT" : 15,
"PC11:QUADSPI_BK2_NCS" : 9,
"PC11:SDMMC1_D3" : 12,
"PC11:SPI3_MISO" : 6,
"PC11:UART4_RX" : 8,
"PC11:USART3_RX" : 7,
"PC12:DCMI_D9" : 13,
"PC12:EVENTOUT" : 15,
"PC12:I2S3_SD" : 6,
"PC12:SDMMC1_CK" : 12,
"PC12:SPI3_MOSI" : 6,
"PC12:TRACED3" : 0,
"PC12:UART5_TX" : 8,
"PC12:USART3_CK" : 7,
"PC13:EVENTOUT" : 15,
"PC14:EVENTOUT" : 15,
"PC15:EVENTOUT" : 15,
"PD0:CAN1_RX" : 9,
"PD0:EVENTOUT" : 15,
"PD0:FMC_D2" : 12,
"PD1:CAN1_TX" : 9,
"PD1:EVENTOUT" : 15,
"PD1:FMC_D3" : 12,
"PD2:DCMI_D11" : 13,
"PD2:EVENTOUT" : 15,
"PD2:SDMMC1_CMD" : 12,
"PD2:TIM3_ETR" : 2,
"PD2:TRACED2" : 0,
"PD2:UART5_RX" : 8,
"PD3:DCMI_D5" : 13,
"PD3:EVENTOUT" : 15,
"PD3:FMC_CLK" : 12,
"PD3:I2S2_CK" : 5,
"PD3:LCD_G7" : 14,
"PD3:SPI2_SCK" : 5,
"PD3:USART2_CTS" : 7,
"PD4:EVENTOUT" : 15,
"PD4:FMC_NOE" : 12,
"PD4:USART2_RTS" : 7,
"PD5:EVENTOUT" : 15,
"PD5:FMC_NWE" : 12,
"PD5:USART2_TX" : 7,
"PD6:DCMI_D10" : 13,
"PD6:EVENTOUT" : 15,
"PD6:FMC_NWAIT" : 12,
"PD6:I2S3_SD" : 5,
"PD6:LCD_B2" : 14,
"PD6:SAI1_SD_A" : 6,
"PD6:SPI3_MOSI" : 5,
"PD6:USART2_RX" : 7,
"PD7:EVENTOUT" : 15,
"PD7:FMC_NE1" : 12,
"PD7:SPDIFRX_IN0" : 8,
"PD7:USART2_CK" : 7,
"PD8:EVENTOUT" : 15,
"PD8:FMC_D13" : 12,
"PD8:SPDIFRX_IN1" : 8,
"PD8:USART3_TX" : 7,
"PD9:EVENTOUT" : 15,
"PD9:FMC_D14" : 12,
"PD9:USART3_RX" : 7,
"PD10:EVENTOUT" : 15,
"PD10:FMC_D15" : 12,
"PD10:LCD_B3" : 14,
"PD10:USART3_CK" : 7,
"PD11:EVENTOUT" : 15,
"PD11:FMC_A16" : 12,
"PD11:FMC_CLE" : 12,
"PD11:I2C4_SMBA" : 4,
"PD11:QUADSPI_BK1_IO0" : 9,
"PD11:SAI2_SD_A" : 10,
"PD11:USART3_CTS" : 7,
"PD12:EVENTOUT" : 15,
"PD12:FMC_A17" : 12,
"PD12:FMC_ALE" : 12,
"PD12:I2C4_SCL" : 4,
"PD12:LPTIM1_IN1" : 3,
"PD12:QUADSPI_BK1_IO1" : 9,
"PD12:SAI2_FS_A" : 10,
"PD12:TIM4_CH1" : 2,
"PD12:USART3_RTS" : 7,
"PD13:EVENTOUT" : 15,
"PD13:FMC_A18" : 12,
"PD13:I2C4_SDA" : 4,
"PD13:LPTIM1_OUT" : 3,
"PD13:QUADSPI_BK1_IO3" : 9,
"PD13:SAI2_SCK_A" : 10,
"PD13:TIM4_CH2" : 2,
"PD14:EVENTOUT" : 15,
"PD14:FMC_D0" : 12,
"PD14:TIM4_CH3" : 2,
"PD14:UART8_CTS" : 8,
"PD15:EVENTOUT" : 15,
"PD15:FMC_D1" : 12,
"PD15:TIM4_CH4" : 2,
"PD15:UART8_RTS" : 8,
"PE0:DCMI_D2" : 13,
"PE0:EVENTOUT" : 15,
"PE0:FMC_NBL0" : 12,
"PE0:LPTIM1_ETR" : 3,
"PE0:SAI2_MCK_A" : 10,
"PE0:TIM4_ETR" : 2,
"PE0:UART8_RX" : 8,
"PE1:DCMI_D3" : 13,
"PE1:EVENTOUT" : 15,
"PE1:FMC_NBL1" : 12,
"PE1:LPTIM1_IN2" : 3,
"PE1:UART8_TX" : 8,
"PE2:ETH_MII_TXD3" : 11,
"PE2:EVENTOUT" : 15,
"PE2:FMC_A23" : 12,
"PE2:QUADSPI_BK1_IO2" : 9,
"PE2:SAI1_MCLK_A" : 6,
"PE2:SPI4_SCK" : 5,
"PE2:TRACECLK" : 0,
"PE3:EVENTOUT" : 15,
"PE3:FMC_A19" : 12,
"PE3:SAI1_SD_B" : 6,
"PE3:TRACED0" : 0,
"PE4:DCMI_D4" : 13,
"PE4:EVENTOUT" : 15,
"PE4:FMC_A20" : 12,
"PE4:LCD_B0" : 14,
"PE4:SAI1_FS_A" : 6,
"PE4:SPI4_NSS" : 5,
"PE4:TRACED1" : 0,
"PE5:DCMI_D6" : 13,
"PE5:EVENTOUT" : 15,
"PE5:FMC_A21" : 12,
"PE5:LCD_G0" : 14,
"PE5:SAI1_SCK_A" : 6,
"PE5:SPI4_MISO" : 5,
"PE5:TIM9_CH1" : 3,
"PE5:TRACED2" : 0,
"PE6:DCMI_D7" : 13,
"PE6:EVENTOUT" : 15,
"PE6:FMC_A22" : 12,
"PE6:LCD_G1" : 14,
"PE6:SAI1_SD_A" : 6,
"PE6:SAI2_MCK_B" : 10,
"PE6:SPI4_MOSI" : 5,
"PE6:TIM1_BKIN2" : 1,
"PE6:TIM9_CH2" : 3,
"PE6:TRACED3" : 0,
"PE7:EVENTOUT" : 15,
"PE7:FMC_D4" : 12,
"PE7:QUADSPI_BK2_IO0" : 10,
"PE7:TIM1_ETR" : 1,
"PE7:UART7_RX" : 8,
"PE8:EVENTOUT" : 15,
"PE8:FMC_D5" : 12,
"PE8:QUADSPI_BK2_IO1" : 10,
"PE8:TIM1_CH1N" : 1,
"PE8:UART7_TX" : 8,
"PE9:EVENTOUT" : 15,
"PE9:FMC_D6" : 12,
"PE9:QUADSPI_BK2_IO2" : 10,
"PE9:TIM1_CH1" : 1,
"PE9:UART7_RTS" : 8,
"PE10:EVENTOUT" : 15,
"PE10:FMC_D7" : 12,
"PE10:QUADSPI_BK2_IO3" : 10,
"PE10:TIM1_CH2N" : 1,
"PE10:UART7_CTS" : 8,
"PE11:EVENTOUT" : 15,
"PE11:FMC_D8" : 12,
"PE11:LCD_G3" : 14,
"PE11:SAI2_SD_B" : 10,
"PE11:SPI4_NSS" : 5,
"PE11:TIM1_CH2" : 1,
"PE12:EVENTOUT" : 15,
"PE12:FMC_D9" : 12,
"PE12:LCD_B4" : 14,
"PE12:SAI2_SCK_B" : 10,
"PE12:SPI4_SCK" : 5,
"PE12:TIM1_CH3N" : 1,
"PE13:EVENTOUT" : 15,
"PE13:FMC_D10" : 12,
"PE13:LCD_DE" : 14,
"PE13:SAI2_FS_B" : 10,
"PE13:SPI4_MISO" : 5,
"PE13:TIM1_CH3" : 1,
"PE14:EVENTOUT" : 15,
"PE14:FMC_D11" : 12,
"PE14:LCD_CLK" : 14,
"PE14:SAI2_MCK_B" : 10,
"PE14:SPI4_MOSI" : 5,
"PE14:TIM1_CH4" : 1,
"PE15:EVENTOUT" : 15,
"PE15:FMC_D12" : 12,
"PE15:LCD_R7" : 14,
"PE15:TIM1_BKIN" : 1,
"PF0:EVENTOUT" : 15,
"PF0:FMC_A0" : 12,
"PF0:I2C2_SDA" : 4,
"PF1:EVENTOUT" : 15,
"PF1:FMC_A1" : 12,
"PF1:I2C2_SCL" : 4,
"PF2:EVENTOUT" : 15,
"PF2:FMC_A2" : 12,
"PF2:I2C2_SMBA" : 4,
"PF3:EVENTOUT" : 15,
"PF3:FMC_A3" : 12,
"PF4:EVENTOUT" : 15,
"PF4:FMC_A4" : 12,
"PF5:EVENTOUT" : 15,
"PF5:FMC_A5" : 12,
"PF6:EVENTOUT" : 15,
"PF6:QUADSPI_BK1_IO3" : 9,
"PF6:SAI1_SD_B" : 6,
"PF6:SPI5_NSS" : 5,
"PF6:TIM10_CH1" : 3,
"PF6:UART7_RX" : 8,
"PF7:EVENTOUT" : 15,
"PF7:QUADSPI_BK1_IO2" : 9,
"PF7:SAI1_MCLK_B" : 6,
"PF7:SPI5_SCK" : 5,
"PF7:TIM11_CH1" : 3,
"PF7:UART7_TX" : 8,
"PF8:EVENTOUT" : 15,
"PF8:QUADSPI_BK1_IO0" : 10,
"PF8:SAI1_SCK_B" : 6,
"PF8:SPI5_MISO" : 5,
"PF8:TIM13_CH1" : 9,
"PF8:UART7_RTS" : 8,
"PF9:EVENTOUT" : 15,
"PF9:QUADSPI_BK1_IO1" : 10,
"PF9:SAI1_FS_B" : 6,
"PF9:SPI5_MOSI" : 5,
"PF9:TIM14_CH1" : 9,
"PF9:UART7_CTS" : 8,
"PF10:DCMI_D11" : 13,
"PF10:EVENTOUT" : 15,
"PF10:LCD_DE" : 14,
"PF11:DCMI_D12" : 13,
"PF11:EVENTOUT" : 15,
"PF11:FMC_SDNRAS" : 12,
"PF11:SAI2_SD_B" : 10,
"PF11:SPI5_MOSI" : 5,
"PF12:EVENTOUT" : 15,
"PF12:FMC_A6" : 12,
"PF13:EVENTOUT" : 15,
"PF13:FMC_A7" : 12,
"PF13:I2C4_SMBA" : 4,
"PF14:EVENTOUT" : 15,
"PF14:FMC_A8" : 12,
"PF14:I2C4_SCL" : 4,
"PF15:EVENTOUT" : 15,
"PF15:FMC_A9" : 12,
"PF15:I2C4_SDA" : 4,
"PG0:EVENTOUT" : 15,
"PG0:FMC_A10" : 12,
"PG1:EVENTOUT" : 15,
"PG1:FMC_A11" : 12,
"PG2:EVENTOUT" : 15,
"PG2:FMC_A12" : 12,
"PG3:EVENTOUT" : 15,
"PG3:FMC_A13" : 12,
"PG4:EVENTOUT" : 15,
"PG4:FMC_A14" : 12,
"PG4:FMC_BA0" : 12,
"PG5:EVENTOUT" : 15,
"PG5:FMC_A15" : 12,
"PG5:FMC_BA1" : 12,
"PG6:DCMI_D12" : 13,
"PG6:EVENTOUT" : 15,
"PG6:LCD_R7" : 14,
"PG7:DCMI_D13" : 13,
"PG7:EVENTOUT" : 15,
"PG7:FMC_INT" : 12,
"PG7:LCD_CLK" : 14,
"PG7:USART6_CK" : 8,
"PG8:ETH_PPS_OUT" : 11,
"PG8:EVENTOUT" : 15,
"PG8:FMC_SDCLK" : 12,
"PG8:SPDIFRX_IN2" : 7,
"PG8:SPI6_NSS" : 5,
"PG8:USART6_RTS" : 8,
"PG9:DCMI_VSYNC" : 13,
"PG9:EVENTOUT" : 15,
"PG9:FMC_NCE" : 12,
"PG9:FMC_NE2" : 12,
"PG9:QUADSPI_BK2_IO2" : 9,
"PG9:SAI2_FS_B" : 10,
"PG9:SPDIFRX_IN3" : 7,
"PG9:USART6_RX" : 8,
"PG10:DCMI_D2" : 13,
"PG10:EVENTOUT" : 15,
"PG10:FMC_NE3" : 12,
"PG10:LCD_B2" : 14,
"PG10:LCD_G3" : 9,
"PG10:SAI2_SD_B" : 10,
"PG11:DCMI_D3" : 13,
"PG11:ETH_MII_TX_EN" : 11,
"PG11:ETH_RMII_TX_EN" : 11,
"PG11:EVENTOUT" : 15,
"PG11:LCD_B3" : 14,
"PG11:SPDIFRX_IN0" : 7,
"PG12:EVENTOUT" : 15,
"PG12:FMC_NE4" : 12,
"PG12:LCD_B1" : 14,
"PG12:LCD_B4" : 9,
"PG12:LPTIM1_IN1" : 3,
"PG12:SPDIFRX_IN1" : 7,
"PG12:SPI6_MISO" : 5,
"PG12:USART6_RTS" : 8,
"PG13:ETH_MII_TXD0" : 11,
"PG13:ETH_RMII_TXD0" : 11,
"PG13:EVENTOUT" : 15,
"PG13:FMC_A24" : 12,
"PG13:LCD_R0" : 14,
"PG13:LPTIM1_OUT" : 3,
"PG13:SPI6_SCK" : 5,
"PG13:TRACED0" : 0,
"PG13:USART6_CTS" : 8,
"PG14:ETH_MII_TXD1" : 11,
"PG14:ETH_RMII_TXD1" : 11,
"PG14:EVENTOUT" : 15,
"PG14:FMC_A25" : 12,
"PG14:LCD_B0" : 14,
"PG14:LPTIM1_ETR" : 3,
"PG14:QUADSPI_BK2_IO3" : 9,
"PG14:SPI6_MOSI" : 5,
"PG14:TRACED1" : 0,
"PG14:USART6_TX" : 8,
"PG15:DCMI_D13" : 13,
"PG15:EVENTOUT" : 15,
"PG15:FMC_SDNCAS" : 12,
"PG15:USART6_CTS" : 8,
"PH0:EVENTOUT" : 15,
"PH1:EVENTOUT" : 15,
"PH2:ETH_MII_CRS" : 11,
"PH2:EVENTOUT" : 15,
"PH2:FMC_SDCKE0" : 12,
"PH2:LCD_R0" : 14,
"PH2:LPTIM1_IN2" : 3,
"PH2:QUADSPI_BK2_IO0" : 9,
"PH2:SAI2_SCK_B" : 10,
"PH3:ETH_MII_COL" : 11,
"PH3:EVENTOUT" : 15,
"PH3:FMC_SDNE0" : 12,
"PH3:LCD_R1" : 14,
"PH3:QUADSPI_BK2_IO1" : 9,
"PH3:SAI2_MCK_B" : 10,
"PH4:EVENTOUT" : 15,
"PH4:I2C2_SCL" : 4,
"PH4:OTG_HS_ULPI_NXT" : 10,
"PH5:EVENTOUT" : 15,
"PH5:FMC_SDNWE" : 12,
"PH5:I2C2_SDA" : 4,
"PH5:SPI5_NSS" : 5,
"PH6:DCMI_D8" : 13,
"PH6:ETH_MII_RXD2" : 11,
"PH6:EVENTOUT" : 15,
"PH6:FMC_SDNE1" : 12,
"PH6:I2C2_SMBA" : 4,
"PH6:SPI5_SCK" : 5,
"PH6:TIM12_CH1" : 9,
"PH7:DCMI_D9" : 13,
"PH7:ETH_MII_RXD3" : 11,
"PH7:EVENTOUT" : 15,
"PH7:FMC_SDCKE1" : 12,
"PH7:I2C3_SCL" : 4,
"PH7:SPI5_MISO" : 5,
"PH8:DCMI_HSYNC" : 13,
"PH8:EVENTOUT" : 15,
"PH8:FMC_D16" : 12,
"PH8:I2C3_SDA" : 4,
"PH8:LCD_R2" : 14,
"PH9:DCMI_D0" : 13,
"PH9:EVENTOUT" : 15,
"PH9:FMC_D17" : 12,
"PH9:I2C3_SMBA" : 4,
"PH9:LCD_R3" : 14,
"PH9:TIM12_CH2" : 9,
"PH10:DCMI_D1" : 13,
"PH10:EVENTOUT" : 15,
"PH10:FMC_D18" : 12,
"PH10:I2C4_SMBA" : 4,
"PH10:LCD_R4" : 14,
"PH10:TIM5_CH1" : 2,
"PH11:DCMI_D2" : 13,
"PH11:EVENTOUT" : 15,
"PH11:FMC_D19" : 12,
"PH11:I2C4_SCL" : 4,
"PH11:LCD_R5" : 14,
"PH11:TIM5_CH2" : 2,
"PH12:DCMI_D3" : 13,
"PH12:EVENTOUT" : 15,
"PH12:FMC_D20" : 12,
"PH12:I2C4_SDA" : 4,
"PH12:LCD_R6" : 14,
"PH12:TIM5_CH3" : 2,
"PH13:CAN1_TX" : 9,
"PH13:EVENTOUT" : 15,
"PH13:FMC_D21" : 12,
"PH13:LCD_G2" : 14,
"PH13:TIM8_CH1N" : 3,
"PH14:DCMI_D4" : 13,
"PH14:EVENTOUT" : 15,
"PH14:FMC_D22" : 12,
"PH14:LCD_G3" : 14,
"PH14:TIM8_CH2N" : 3,
"PH15:DCMI_D11" : 13,
"PH15:EVENTOUT" : 15,
"PH15:FMC_D23" : 12,
"PH15:LCD_G4" : 14,
"PH15:TIM8_CH3N" : 3,
}
ADC1_map = {
# format is PIN : ADC1_CHAN
# extracted from tabula-addfunc-F745.csv
"PA1" : 1,
"PA2" : 2,
"PA3" : 3,
"PA4" : 4,
"PA5" : 5,
"PA6" : 6,
"PA7" : 7,
"PB0" : 8,
"PB1" : 9,
"PC0" : 10,
"PC1" : 11,
"PC2" : 12,
"PC3" : 13,
"PC4" : 14,
"PC5" : 15,
}
if __name__ == '__main__':
# test clock settings
def check_range(name, value, minv, maxv):
'''check range of a value'''
import sys
if value < minv:
print("Error: %s=%u is under min of %u" % (name, value, minv))
if value > maxv:
print("Error: %s=%u is over max of %u" % (name, value, maxv))
import sys
STM32_LSECLK_MIN = 32768
STM32_PLLIN_MAX = 2100000
STM32_PLLIN_MIN = 950000
STM32_PLLVCO_MAX = 432000000
STM32_PLLVCO_MIN = 192000000
STM32_PLLM_VALUE = 8
STM32_PLLN_VALUE = 432
STM32_PLLP_VALUE = 2
STM32_PLLQ_VALUE = 9
OSCILLATOR_HZ = int(sys.argv[1])
STM32_HSECLK = OSCILLATOR_HZ
STM32_PLLCLKIN = (STM32_HSECLK / STM32_PLLM_VALUE)
STM32_PLLVCO = (STM32_PLLCLKIN * STM32_PLLN_VALUE)
print('STM32_PLLCLKIN=%u' % STM32_PLLCLKIN)
print('STM32_PLLVCO=%u' % STM32_PLLVCO)
check_range('STM32_PLLCLKIN', STM32_PLLCLKIN, STM32_PLLIN_MIN, STM32_PLLIN_MAX)
check_range('STM32_PLLVCO', STM32_PLLVCO, STM32_PLLVCO_MIN, STM32_PLLVCO_MAX)
|
gnowledge/ISON
|
refs/heads/master
|
objectapp/urls/sitemap.py
|
3
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Urls for the Objectapp sitemap"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
urlpatterns = patterns('objectapp.views.sitemap',
url(r'^$', 'sitemap',
{'template': 'objectapp/sitemap.html'},
name='objectapp_sitemap'),
)
|
ramitalat/odoo
|
refs/heads/8.0
|
addons/l10n_fr_rib/bank.py
|
335
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_partner_bank(osv.osv):
"""Add fields and behavior for French RIB"""
_inherit = "res.partner.bank"
def _check_key(self, cr, uid, ids):
"""Check the RIB key"""
for bank_acc in self.browse(cr, uid, ids):
# Ignore the accounts of type other than rib
if bank_acc.state != 'rib':
continue
# Fail if the needed values are empty of too short
if (not bank_acc.bank_code
or len(bank_acc.bank_code) != 5
or not bank_acc.office or len(bank_acc.office) != 5
or not bank_acc.rib_acc_number or len(bank_acc.rib_acc_number) != 11
or not bank_acc.key or len(bank_acc.key) != 2):
return False
# Get the rib data (without the key)
rib = "%s%s%s" % (bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number)
# Translate letters into numbers according to a specific table
# (notice how s -> 2)
table = dict((ord(a), b) for a, b in zip(
u'abcdefghijklmnopqrstuvwxyz', u'12345678912345678923456789'))
rib = rib.lower().translate(table)
# compute the key
key = 97 - (100 * int(rib)) % 97
if int(bank_acc.key) != key:
raise osv.except_osv(_('Error!'),
_("The RIB key %s does not correspond to the other codes: %s %s %s.") % \
(bank_acc.key, bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number) )
if bank_acc.acc_number:
if not self.is_iban_valid(cr, uid, bank_acc.acc_number):
raise osv.except_osv(_('Error!'), _("The IBAN %s is not valid.") % bank_acc.acc_number)
return True
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
"""Change the bank code"""
result = super(res_partner_bank, self).onchange_bank_id(cr, uid, ids, bank_id,
context=context)
if bank_id:
value = result.setdefault('value', {})
bank = self.pool.get('res.bank').browse(cr, uid, bank_id,
context=context)
value['bank_code'] = bank.rib_code
return result
_columns = {
'acc_number': fields.char('Account Number', size=64, required=False),
'rib_acc_number': fields.char('RIB account number', size=11, readonly=True,),
'bank_code': fields.char('Bank Code', size=64, readonly=True,),
'office': fields.char('Office Code', size=5, readonly=True,),
'key': fields.char('Key', size=2, readonly=True,
help="The key is a number allowing to check the "
"correctness of the other codes."),
}
_constraints = [(_check_key, 'The RIB and/or IBAN is not valid', ['rib_acc_number', 'bank_code', 'office', 'key'])]
class res_bank(osv.osv):
"""Add the bank code to make it easier to enter RIB data"""
_inherit = 'res.bank'
def name_search(self, cr, user, name, args=None, operator='ilike',
context=None, limit=80):
"""Search by bank code in addition to the standard search"""
# Get the standard results
results = super(res_bank, self).name_search(cr, user,
name, args=args ,operator=operator, context=context, limit=limit)
# Get additional results using the RIB code
ids = self.search(cr, user, [('rib_code', operator, name)],
limit=limit, context=context)
# Merge the results
results = list(set(results + self.name_get(cr, user, ids, context)))
return results
_columns = {
'rib_code': fields.char('RIB Bank Code'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
meredith-digops/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/s3_bucket.py
|
40
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: s3_bucket
short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
description:
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
force:
description:
- When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
required: false
default: no
choices: [ 'yes', 'no' ]
name:
description:
- Name of the s3 bucket
required: true
default: null
policy:
description:
- The JSON policy as a string.
required: false
default: null
s3_url:
description:
- S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
ceph:
description:
- Enable API compatibility with Ceph. It takes into account the S3 API subset working
with Ceph in order to provide the same module behaviour where possible.
version_added: "2.2"
requester_pays:
description:
- With Requester Pays buckets, the requester instead of the bucket owner pays the cost
of the request and the data download from the bucket.
required: false
default: no
choices: [ 'yes', 'no' ]
state:
description:
- Create or remove the s3 bucket
required: false
default: present
choices: [ 'present', 'absent' ]
tags:
description:
- tags dict to apply to bucket
required: false
default: null
versioning:
description:
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
required: false
default: null
choices: [ 'yes', 'no' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a simple s3 bucket
- s3_bucket:
name: mys3bucket
# Create a simple s3 bucket on Ceph Rados Gateway
- s3_bucket:
name: mys3bucket
s3_url: http://your-ceph-rados-gateway-server.xxx
ceph: true
# Remove an s3 bucket and any keys it contains
- s3_bucket:
name: mys3bucket
state: absent
force: yes
# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
- s3_bucket:
name: mys3bucket
policy: "{{ lookup('file','policy.json') }}"
requester_pays: yes
versioning: yes
tags:
example: tag1
another: tag2
'''
import json
import os
import traceback
import xml.etree.ElementTree as ET
import ansible.module_utils.six.moves.urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.ec2 import sort_json_policy_dict
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location, S3Connection
from boto.s3.tagging import Tags, TagSet
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_request_payment_status(bucket):
response = bucket.get_request_payment()
root = ET.fromstring(response)
for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
payer = message.text
return (payer != "BucketOwner")
def create_tags_container(tags):
tag_set = TagSet()
tags_obj = Tags()
for key, val in tags.items():
tag_set.add_tag(key, val)
tags_obj.add_tag_set(tag_set)
return tags_obj
def _create_or_update_bucket(connection, module, location):
policy = module.params.get("policy")
name = module.params.get("name")
requester_pays = module.params.get("requester_pays")
tags = module.params.get("tags")
versioning = module.params.get("versioning")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
try:
bucket = connection.create_bucket(name, location=location)
changed = True
except S3CreateError as e:
module.fail_json(msg=e.message)
# Versioning
versioning_status = bucket.get_versioning_status()
if versioning is not None:
if versioning and versioning_status.get('Versioning') != "Enabled":
try:
bucket.configure_versioning(versioning)
changed = True
versioning_status = bucket.get_versioning_status()
except S3ResponseError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
elif not versioning and versioning_status.get('Versioning') == "Enabled":
try:
bucket.configure_versioning(versioning)
changed = True
versioning_status = bucket.get_versioning_status()
except S3ResponseError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
# Requester pays
requester_pays_status = get_request_payment_status(bucket)
if requester_pays_status != requester_pays:
if requester_pays:
payer='Requester'
else:
payer='BucketOwner'
bucket.set_request_payment(payer=payer)
changed = True
requester_pays_status = get_request_payment_status(bucket)
# Policy
try:
current_policy = json.loads(bucket.get_policy())
except S3ResponseError as e:
if e.error_code == "NoSuchBucketPolicy":
current_policy = {}
else:
module.fail_json(msg=e.message)
if policy is not None:
if isinstance(policy, basestring):
policy = json.loads(policy)
if not policy:
bucket.delete_policy()
# only show changed if there was already a policy
changed = bool(current_policy)
elif sort_json_policy_dict(current_policy) != sort_json_policy_dict(policy):
try:
bucket.set_policy(json.dumps(policy))
changed = True
current_policy = json.loads(bucket.get_policy())
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Tags
try:
current_tags = bucket.get_tags()
except S3ResponseError as e:
if e.error_code == "NoSuchTagSet":
current_tags = None
else:
module.fail_json(msg=e.message)
if current_tags is None:
current_tags_dict = {}
else:
current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
if tags is not None:
if current_tags_dict != tags:
try:
if tags:
bucket.set_tags(create_tags_container(tags))
else:
bucket.delete_tags()
current_tags_dict = tags
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status,
requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
def _destroy_bucket(connection, module):
force = module.params.get("force")
name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
if e.error_code != "NoSuchBucket":
module.fail_json(msg=e.message)
else:
# Bucket already absent
module.exit_json(changed=changed)
if force:
try:
# Empty the bucket
for key in bucket.list():
key.delete()
except BotoServerError as e:
module.fail_json(msg=e.message)
try:
bucket = connection.delete_bucket(name)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def _create_or_update_bucket_ceph(connection, module, location):
#TODO: add update
name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
try:
bucket = connection.create_bucket(name, location=location)
changed = True
except S3CreateError as e:
module.fail_json(msg=e.message)
if bucket:
module.exit_json(changed=changed)
else:
module.fail_json(msg='Unable to create bucket, no error from the API')
def _destroy_bucket_ceph(connection, module):
_destroy_bucket(connection, module)
def create_or_update_bucket(connection, module, location, flavour='aws'):
if flavour == 'ceph':
_create_or_update_bucket_ceph(connection, module, location)
else:
_create_or_update_bucket(connection, module, location)
def destroy_bucket(connection, module, flavour='aws'):
if flavour == 'ceph':
_destroy_bucket_ceph(connection, module)
else:
_destroy_bucket(connection, module)
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
force=dict(required=False, default='no', type='bool'),
policy=dict(required=False, default=None, type='json'),
name=dict(required=True, type='str'),
requester_pays=dict(default='no', type='bool'),
s3_url=dict(aliases=['S3_URL'], type='str'),
state=dict(default='present', type='str', choices=['present', 'absent']),
tags=dict(required=False, default=None, type='dict'),
versioning=dict(default=None, type='bool'),
ceph=dict(default='no', type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
s3_url = module.params.get('s3_url')
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
ceph = module.params.get('ceph')
if ceph and not s3_url:
module.fail_json(msg='ceph flavour requires s3_url')
flavour = 'aws'
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if s3_url and ceph:
ceph = urlparse.urlparse(s3_url)
connection = boto.connect_s3(
host=ceph.hostname,
port=ceph.port,
is_secure=ceph.scheme == 'https',
calling_format=OrdinaryCallingFormat(),
**aws_connect_params
)
flavour = 'ceph'
elif is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
connection = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat(),
**aws_connect_params
)
elif is_walrus(s3_url):
walrus = urlparse.urlparse(s3_url).hostname
connection = boto.connect_walrus(walrus, **aws_connect_params)
else:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception as e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
if connection is None: # this should never happen
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
state = module.params.get("state")
if state == 'present':
create_or_update_bucket(connection, module, location, flavour=flavour)
elif state == 'absent':
destroy_bucket(connection, module, flavour=flavour)
if __name__ == '__main__':
main()
|
elainenaomi/sciwonc-dataflow-examples
|
refs/heads/master
|
dissertation2017/Experiment 1B/instances/11_1_workflow_full_10files_secondary_w1_3sh_3rs_with_annot_with_proj_3s_hash/calculateratio_8/ConfigDB_Calc_TEInfo_8.py
|
36
|
HOST = "ip-172-31-29-102.us-west-2.compute.internal:27017,ip-172-31-29-103.us-west-2.compute.internal:27017,ip-172-31-29-104.us-west-2.compute.internal:27017,ip-172-31-29-105.us-west-2.compute.internal:27017,ip-172-31-29-101.us-west-2.compute.internal:27017,ip-172-31-29-106.us-west-2.compute.internal:27017,ip-172-31-29-107.us-west-2.compute.internal:27017,ip-172-31-29-108.us-west-2.compute.internal:27017,ip-172-31-29-109.us-west-2.compute.internal:27017"
PORT = ""
USER = ""
PASSWORD = ""
DATABASE = "google"
READ_PREFERENCE = "primary"
COLLECTION_INPUT = "task_events_info"
PREFIX_COLUMN = "g_"
ATTRIBUTES = ["event type", "standard deviation memory", "standard deviation cpu","standard deviation ratio", "average memory", "average cpu","average ratio"]
SORT = ["_id.filepath", "_id.numline"]
OPERATION_TYPE = "GROUP_BY_COLUMN"
COLUMN = "event type"
VALUE = ["0","1","2","3","4","5","6","7","8"]
INPUT_FILE = "task_events_info.dat"
|
geekboxzone/mmallow_external_protobuf
|
refs/heads/geekbox
|
python/google/protobuf/internal/api_implementation_default_test.py
|
73
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test that the api_implementation defaults are what we expect."""
import os
import sys
# Clear environment implementation settings before the google3 imports.
os.environ.pop('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', None)
os.environ.pop('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', None)
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
class ApiImplementationDefaultTest(basetest.TestCase):
if sys.version_info.major <= 2:
def testThatPythonIsTheDefault(self):
"""If -DPYTHON_PROTO_*IMPL* was given at build time, this may fail."""
self.assertEqual('python', api_implementation.Type())
else:
def testThatCppApiV2IsTheDefault(self):
"""If -DPYTHON_PROTO_*IMPL* was given at build time, this may fail."""
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
if __name__ == '__main__':
basetest.main()
|
gioman/QGIS
|
refs/heads/master
|
tests/src/python/test_qgspointdisplacementrenderer.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgspointdisplacementrenderer.py
-----------------------------
Date : September 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtCore import QSize
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsPointDisplacementRenderer,
QgsFontUtils,
QgsUnitTypes,
QgsMapUnitScale,
QgsMarkerSymbol,
QgsSingleSymbolRenderer,
QgsPointClusterRenderer,
QgsMapSettings,
QgsProperty,
QgsReadWriteContext,
QgsSymbolLayer
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsPointDisplacementRenderer(unittest.TestCase):
def setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.layer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.renderer = QgsPointDisplacementRenderer()
sym1 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '3', 'outline_style': 'no'})
renderer = QgsSingleSymbolRenderer(sym1)
self.renderer.setEmbeddedRenderer(renderer)
self.renderer.setCircleRadiusAddition(2)
self.renderer.setCircleWidth(1)
self.renderer.setCircleColor(QColor(0, 0, 0))
self.renderer.setCenterSymbol(QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'}))
self.layer.setRenderer(self.renderer)
rendered_layers = [self.layer]
self.mapsettings = QgsMapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-123, 18, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def _setProperties(self, r):
""" set properties for a renderer for testing with _checkProperties"""
r.setLabelAttributeName('name')
f = QgsFontUtils.getStandardTestFont('Bold Oblique', 14)
r.setLabelFont(f)
r.setMaxLabelScaleDenominator(50000)
r.setLabelColor(QColor(255, 0, 0))
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
r.setCircleWidth(15)
r.setCircleColor(QColor(0, 255, 0))
r.setCircleRadiusAddition(2.5)
r.setPlacement(QgsPointDisplacementRenderer.ConcentricRings)
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setCenterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
def _checkProperties(self, r):
""" test properties of renderer against expected"""
self.assertEqual(r.labelAttributeName(), 'name')
f = QgsFontUtils.getStandardTestFont('Bold Oblique', 14)
self.assertEqual(r.labelFont().styleName(), f.styleName())
self.assertEqual(r.maxLabelScaleDenominator(), 50000)
self.assertEqual(r.labelColor(), QColor(255, 0, 0))
self.assertEqual(r.tolerance(), 5)
self.assertEqual(r.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(r.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(r.circleWidth(), 15)
self.assertEqual(r.circleColor(), QColor(0, 255, 0))
self.assertEqual(r.circleRadiusAddition(), 2.5)
self.assertEqual(r.placement(), QgsPointDisplacementRenderer.ConcentricRings)
self.assertEqual(r.centerSymbol().color(), QColor(0, 255, 0))
self.assertEqual(r.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testGettersSetters(self):
""" test getters and setters """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
self._checkProperties(r)
def testClone(self):
""" test cloning renderer """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
c = r.clone()
self._checkProperties(c)
def testSaveCreate(self):
""" test saving and recreating from XML """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
doc = QDomDocument("testdoc")
elem = r.save(doc, QgsReadWriteContext())
c = QgsPointDisplacementRenderer.create(elem, QgsReadWriteContext())
self._checkProperties(c)
def testConvert(self):
""" test renderer conversion """
# same type, should clone
r = QgsPointDisplacementRenderer()
self._setProperties(r)
c = QgsPointDisplacementRenderer.convertFromRenderer(r)
self._checkProperties(c)
# test conversion from cluster renderer
r = QgsPointClusterRenderer()
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setClusterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
# want to keep as many settings as possible when converting between cluster and displacement renderer
d = QgsPointDisplacementRenderer.convertFromRenderer(r)
self.assertEqual(d.tolerance(), 5)
self.assertEqual(d.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(d.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(d.centerSymbol().color(), QColor(0, 255, 0))
self.assertEqual(d.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testRenderNoCluster(self):
self.layer.renderer().setTolerance(1)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_no_cluster')
self.assertTrue(renderchecker.runTest('displacement_no_cluster'))
def testRenderWithin(self):
self.layer.renderer().setTolerance(10)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster')
self.assertTrue(renderchecker.runTest('expected_displacement_cluster'))
def testRenderVariables(self):
""" test rendering with expression variables in marker """
self.layer.renderer().setTolerance(10)
old_marker = self.layer.renderer().centerSymbol().clone()
new_marker = QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'})
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('@cluster_color'))
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertySize, QgsProperty.fromExpression('@cluster_size*2'))
self.layer.renderer().setCenterSymbol(new_marker)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_variables')
result = renderchecker.runTest('expected_displacement_variables')
self.layer.renderer().setCenterSymbol(old_marker)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
kisna72/django
|
refs/heads/master
|
tests/model_package/tests.py
|
380
|
from __future__ import unicode_literals
from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
|
coursemdetw/2014c2
|
refs/heads/master
|
wsgi/static/reeborg/src/libraries/brython/Lib/logging/__init__.py
|
733
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
aviaryan/open-event-orga-server
|
refs/heads/development
|
app/helpers/invoicing.py
|
8
|
from datetime import datetime
from app.helpers.cache import cache
from app.helpers.data import save_to_db
from app.helpers.helpers import get_count, represents_int
from app.helpers.payment import StripePaymentsManager, PayPalPaymentsManager
from app.models.discount_code import DiscountCode, EVENT
from app.models.event import Event
from app.models.event_invoice import EventInvoice
class InvoicingManager(object):
"""All event service fee invoicing related functions"""
@cache.memoize(50)
def get_invoice(self, invoice_id):
return EventInvoice.query.get(invoice_id)
@staticmethod
def get_invoice_by_identifier(identifier):
return EventInvoice.query.filter_by(identifier=identifier).first()
@staticmethod
def get_discount_codes():
return DiscountCode.query.filter_by(used_for=EVENT).all()
@staticmethod
def get_discount_code_used_count(discount_code_id):
return get_count(Event.query.filter_by(discount_code_id=discount_code_id))
@staticmethod
def get_discount_code(discount_code):
if represents_int(discount_code):
return DiscountCode.query.filter_by(id=discount_code).filter_by(used_for=EVENT).first()
else:
return DiscountCode.query.filter_by(code=discount_code).filter_by(used_for=EVENT).first()
@staticmethod
def get_invoices(event_id=None, status=None, from_date=None, to_date=None):
if event_id:
if status:
invoices = EventInvoice.query.filter_by(event_id=event_id).filter_by(status=status)
else:
invoices = EventInvoice.query.filter_by(event_id=event_id)
else:
if status:
invoices = EventInvoice.query.filter_by(status=status)
else:
invoices = EventInvoice.query
if from_date:
invoices = invoices.filter(EventInvoice.created_at >= from_date)
if to_date:
invoices = invoices.filter(EventInvoice.created_at <= to_date)
return invoices.all()
@staticmethod
def get_invoices_count(event_id, status='completed'):
return get_count(EventInvoice.query.filter_by(event_id=event_id).filter_by(status=status))
@staticmethod
def initiate_invoice_payment(form):
identifier = form['identifier']
email = form['email']
invoice = InvoicingManager.get_invoice_by_identifier(identifier)
if invoice:
user = invoice.user
if invoice.amount > 0 \
and (not invoice.paid_via
or (invoice.paid_via
and (invoice.paid_via == 'stripe'
or invoice.paid_via == 'paypal'))):
country = form['country']
address = form['address']
city = form['city']
state = form['state']
zipcode = form['zipcode']
invoice.address = address
invoice.city = city
invoice.state = state
invoice.country = country
invoice.zipcode = zipcode
invoice.status = 'initialized'
else:
invoice.status = 'completed'
invoice.completed_at = datetime.utcnow()
if not invoice.paid_via:
invoice.paid_via = 'free'
save_to_db(invoice)
return invoice
else:
return False
@staticmethod
def charge_stripe_invoice_payment(form):
invoice = InvoicingManager.get_invoice_by_identifier(form['identifier'])
invoice.stripe_token = form['stripe_token_id']
save_to_db(invoice)
charge = StripePaymentsManager.capture_payment(invoice, credentials=StripePaymentsManager.get_credentials())
if charge:
invoice.paid_via = 'stripe'
invoice.payment_mode = charge.source.object
invoice.brand = charge.source.brand
invoice.exp_month = charge.source.exp_month
invoice.exp_year = charge.source.exp_year
invoice.last4 = charge.source.last4
invoice.transaction_id = charge.id
invoice.status = 'completed'
invoice.completed_at = datetime.utcnow()
save_to_db(invoice)
return True, invoice
else:
return False, 'Error'
@staticmethod
def charge_paypal_invoice_payment(invoice):
payment_details = PayPalPaymentsManager \
.get_approved_payment_details(invoice, credentials=PayPalPaymentsManager.get_credentials())
if 'PAYERID' in payment_details:
capture_result = PayPalPaymentsManager \
.capture_payment(invoice, payment_details['PAYERID'],
credentials=PayPalPaymentsManager.get_credentials())
if capture_result['ACK'] == 'Success':
invoice.paid_via = 'paypal'
invoice.status = 'completed'
invoice.transaction_id = capture_result['PAYMENTINFO_0_TRANSACTIONID']
invoice.completed_at = datetime.utcnow()
save_to_db(invoice)
return True, invoice
else:
return False, capture_result['L_SHORTMESSAGE0']
else:
return False, 'Payer ID missing. Payment flow tampered.'
@staticmethod
def create_edit_discount_code(form, discount_code_id=None):
if not discount_code_id:
discount_code = DiscountCode()
else:
discount_code = InvoicingManager.get_discount_code(discount_code_id)
discount_code.code = form.get('code')
discount_code.value = form.get('value')
discount_code.type = 'percent'
discount_code.max_quantity = form.get('max_quantity', None)
discount_code.tickets_number = form.get('tickets_number')
discount_code.used_for = EVENT
discount_code.marketer_id = form.get('marketer')
discount_code.is_active = form.get('status', 'in_active') == 'active'
if discount_code.max_quantity == "":
discount_code.max_quantity = None
try:
discount_code.valid_from = datetime.strptime(form.get('start_date', None) + ' ' +
form.get('start_time', None), '%m/%d/%Y %H:%M')
except:
discount_code.valid_from = None
try:
discount_code.valid_till = datetime.strptime(form.get('end_date', None) + ' ' +
form.get('end_time', None), '%m/%d/%Y %H:%M')
except:
discount_code.valid_till = None
save_to_db(discount_code)
return discount_code
|
hexlism/xx_net
|
refs/heads/master
|
gae_proxy/server/lib/google/appengine/api/yaml_errors.py
|
19
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Errors used in the YAML API, which is used by app developers."""
class Error(Exception):
"""Base datastore yaml error type."""
class ProtocolBufferParseError(Error):
"""Error in protocol buffer parsing"""
class EmptyConfigurationFile(Error):
"""Tried to load empty configuration file."""
class MultipleConfigurationFile(Error):
"""Tried to load configuration file with multiple objects."""
class UnexpectedAttribute(Error):
"""Raised when an unexpected attribute is encounted."""
class DuplicateAttribute(Error):
"""Generated when an attribute is assigned to twice."""
class ListenerConfigurationError(Error):
"""Generated when there is a parsing problem due to configuration."""
class IllegalEvent(Error):
"""Raised when an unexpected event type is received by listener."""
class InternalError(Error):
"""Raised when an internal implementation error is detected."""
class EventListenerError(Error):
"""Top level exception raised by YAML listener.
Any exception raised within the process of parsing a YAML file via an
EventListener is caught and wrapped in an EventListenerError. The causing
exception is maintained, but additional useful information is saved which
can be used for reporting useful information to users.
Attributes:
cause: The original exception which caused the EventListenerError.
"""
def __init__(self, cause):
"""Initialize event-listener error."""
if hasattr(cause, 'args') and cause.args:
Error.__init__(self, *cause.args)
else:
Error.__init__(self, str(cause))
self.cause = cause
class EventListenerYAMLError(EventListenerError):
"""Generated specifically for yaml.error.YAMLError."""
class EventError(EventListenerError):
"""Generated specifically when an error occurs in event handler.
Attributes:
cause: The original exception which caused the EventListenerError.
event: Event being handled when exception occured.
"""
def __init__(self, cause, event):
"""Initialize event-listener error."""
EventListenerError.__init__(self, cause)
self.event = event
def __str__(self):
return '%s\n%s' % (self.cause, self.event.start_mark)
|
FluidityProject/fluidity
|
refs/heads/testharnessClean
|
tests/mms_rans_p2p1_keps_linearmomentum_cv/function_printer.py
|
6
|
from mms_rans_p2p1_keps_tools import *
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import sys
'''
run using:
python function_printer.py AA BB CC DD .. n_rows
where:
AA, BB, CC, DD are names of functions in mms_rans_p2p1_keps_tools.py (any number can be entered)
n_rows is the number of rows to display the functions on
'''
functions = []
for arg in sys.argv[1:-1]:
functions.append(arg)
n_rows = int(sys.argv[-1])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.2, hspace=0.2)
res = 50
X = linspace(0.0, pi, res)
Y = linspace(0.0, pi, res)
x = [0,0]
data = empty([len(functions), res, res])
for z, function in enumerate(functions):
for j, x[0] in enumerate(X):
for i, x[1] in enumerate(Y):
data[z,i,j] = eval(function + '(x)')
plt.subplot(n_rows, len(functions)/n_rows + 1, z+1)
CS = plt.contour(X, Y, data[z])
plt.clabel(CS, inline=1, fontsize=10)
plt.title(functions[z])
plt.show()
|
lixiangning888/whole_project
|
refs/heads/master
|
modules/signatures/stealth_timelimit.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Accuvant, Inc. (bspengler@accuvant.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class StealthTimeout(Signature):
name = "stealth_timeout"
description = "可能进行了时间有效期检查,检查本地时间后过早退出"
severity = 3
weight = 3
confidence = 80
categories = ["stealth"]
authors = ["Accuvant"]
minimum = "1.3"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.lastprocess = 0
self.systimeidx = 0
self.exitidx = 0
self.curidx = 0
def on_call(self, call, process):
if process is not self.lastprocess:
self.lastprocess = process
self.systimeidx = 0
self.exitidx = 0
self.curidx = 0
self.curidx += 1
if call["api"] == "GetSystemTimeAsFileTime" or call["api"] == "GetSystemTime" or call["api"] == "GetLocalTime" or call["api"] == "NtQuerySystemTime":
self.systimeidx = self.curidx
elif call["api"] == "NtTerminateProcess":
handle = self.get_argument(call, "ProcessHandle")
if handle == "0xffffffff" or handle == "0x00000000":
self.exitidx = self.curidx
if self.systimeidx and self.exitidx and self.systimeidx > (self.exitidx - 10):
self.data.append({"process" : process["process_name"] + ", PID " + str(process["process_id"])})
return True
return None
|
translate/virtaal
|
refs/heads/master
|
virtaal/models/basemodel.py
|
6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import gobject
class BaseModel(gobject.GObject):
"""Base class for all models."""
__gtype_name__ = "BaseModel"
__gsignals__ = {
"loaded": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()),
"saved": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()),
}
# INITIALIZERS #
def __init__(self):
gobject.GObject.__init__(self)
# ACCESSORS #
def is_modified(self):
return False
# METHODS #
def loaded(self):
"""Emits the "loaded" signal."""
self.emit('loaded')
def saved(self):
"""Emits the "saved" signal."""
self.emit('saved')
|
soldag/home-assistant
|
refs/heads/dev
|
homeassistant/components/sleepiq/const.py
|
21
|
"""Define constants for the SleepIQ component."""
DOMAIN = "sleepiq"
IS_IN_BED = "is_in_bed"
SLEEP_NUMBER = "sleep_number"
SENSOR_TYPES = {SLEEP_NUMBER: "SleepNumber", IS_IN_BED: "Is In Bed"}
LEFT = "left"
RIGHT = "right"
SIDES = [LEFT, RIGHT]
|
drawks/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
|
15
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
version_added: "2.1"
short_description: Get virtual network facts.
description:
- Get facts for a specific virtual network or all virtual networks within a resource group.
options:
name:
description:
- Only show results for a specific security group.
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht) <house@redhat.com>"
- "Matt Davis (@nitzmahone) <mdavis@redhat.com>"
'''
EXAMPLES = '''
- name: Get facts for one virtual network
azure_rm_virtualnetwork_facts:
resource_group: myResourceGroup
name: secgroup001
- name: Get facts for all virtual networks
azure_rm_virtualnetwork_facts:
resource_group: myResourceGroup
- name: Get facts by tags
azure_rm_virtualnetwork_facts:
tags:
- testing
'''
RETURN = '''
azure_virtualnetworks:
description: List of virtual network dicts.
returned: always
type: list
example: [{
"etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001",
"location": "eastus2",
"name": "vnet2001",
"properties": {
"addressSpace": {
"addressPrefixes": [
"10.10.0.0/16"
]
},
"provisioningState": "Succeeded",
"resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
"subnets": []
},
"type": "Microsoft.Network/virtualNetworks"
}]
virtualnetworks:
description: List of virtual network dicts with same format as azure_rm_virtualnetwork module parameters.
returned: always
type: list
contains:
id:
description:
- Resource ID.
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001
type: str
address_prefixes:
description:
- List of IPv4 address ranges where each is formatted using CIDR notation.
sample: ["10.10.0.0/16"]
type: list
dns_servers:
description:
- Custom list of DNS servers.
type: list
sample: ["www.azure.com"]
location:
description:
- Valid azure location.
type: str
sample: eastus
tags:
description:
- Tags assigned to the resource. Dictionary of string:string pairs.
type: dict
sample: { "tag1": "abc" }
provisioning_state:
description:
- Provisioning state of the resource.
sample: Successed
type: str
name:
description:
- name of the virtual network.
type: str
sample: foo
subnets:
description:
- Subnets associate to this virtual network.
type: list
contains:
id:
description:
- Resource ID.
type: str
name:
description:
- Resource Name.
type: str
provisioning_state:
description:
- provision state of the Resource.
type: str
sample: Successed
address_prefix:
description:
- The address prefix for the subnet.
network_security_group:
description:
- Existing security group id with which to associate the subnet.
type: str
route_table:
description:
- The reference of the RouteTable resource.
type: str
service_endpoints:
description:
- An array of service endpoints.
type: list
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'VirtualNetwork'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_virtualnetworks=[]),
virtualnetworks=[]
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
results = self.get_item()
elif self.resource_group is not None:
results = self.list_resource_group()
else:
results = self.list_items()
self.results['ansible_facts']['azure_virtualnetworks'] = self.serialize(results)
self.results['virtualnetworks'] = self.curated(results)
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.network_client.virtual_networks.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [item]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.virtual_networks.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item)
return results
def list_items(self):
self.log('List all for items')
try:
response = self.network_client.virtual_networks.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item)
return results
def serialize(self, raws):
self.log("Serialize all items")
return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
def curated(self, raws):
self.log("Format all items")
return [self.virtualnetwork_to_dict(x) for x in raws] if raws else []
def virtualnetwork_to_dict(self, vnet):
results = dict(
id=vnet.id,
name=vnet.name,
location=vnet.location,
tags=vnet.tags,
provisioning_state=vnet.provisioning_state
)
if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
results['dns_servers'] = []
for server in vnet.dhcp_options.dns_servers:
results['dns_servers'].append(server)
if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
results['address_prefixes'] = []
for space in vnet.address_space.address_prefixes:
results['address_prefixes'].append(space)
if vnet.subnets and len(vnet.subnets) > 0:
results['subnets'] = [self.subnet_to_dict(x) for x in vnet.subnets]
return results
def subnet_to_dict(self, subnet):
result = dict(
id=subnet.id,
name=subnet.name,
provisioning_state=subnet.provisioning_state,
address_prefix=subnet.address_prefix,
network_security_group=subnet.network_security_group.id if subnet.network_security_group else None,
route_table=subnet.route_table.id if subnet.route_table else None
)
if subnet.service_endpoints:
result['service_endpoints'] = [{'service': item.service, 'locations': item.locations} for item in subnet.service_endpoints]
return result
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
|
paolodedios/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/examples/v1/debug_fibonacci.py
|
22
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo of the tfdbg curses UI: A TF network computing Fibonacci sequence."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow
from tensorflow.python import debug as tf_debug
tf = tensorflow.compat.v1
FLAGS = None
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
n0 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_00")
n1 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_01")
for i in xrange(2, FLAGS.length):
n0, n1 = n1, tf.add(n0, n1, name="node_%.2d" % i)
sess.run(tf.global_variables_initializer())
# Wrap the TensorFlow Session object for debugging.
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
def has_negative(_, tensor):
return np.any(tensor < 0)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
sess.add_tensor_filter("has_negative", has_negative)
elif FLAGS.tensorboard_debug_address:
sess = tf_debug.TensorBoardDebugWrapperSession(
sess, FLAGS.tensorboard_debug_address)
print("Fibonacci number at position %d:\n%s" % (FLAGS.length, sess.run(n1)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--tensor_size",
type=int,
default=1,
help="""\
Size of tensor. E.g., if the value is 30, the tensors will have shape
[30, 30].\
""")
parser.add_argument(
"--length",
type=int,
default=20,
help="Length of the fibonacci sequence to compute.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Use TensorFlow Debugger (tfdbg). Mutually exclusive with the "
"--tensorboard_debug_address flag.")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
bob-the-hamster/kivy
|
refs/heads/master
|
kivy/uix/selectableview.py
|
52
|
from kivy.properties import NumericProperty, BooleanProperty
class SelectableView(object):
'''The :class:`SelectableView` mixin is used with list items and other
classes that are to be instantiated in a list view or other classes
which use a selection-enabled adapter such as ListAdapter. select() and
deselect() can be overridden with display code to mark items as
selected or not, if desired.
'''
index = NumericProperty(-1)
'''The index into the underlying data list or the data item this view
represents.
'''
is_selected = BooleanProperty(False)
'''A SelectableView instance carries this property which should be kept
in sync with the equivalent property the data item represents.
'''
def __init__(self, **kwargs):
super(SelectableView, self).__init__(**kwargs)
def select(self, *args):
'''The list item is responsible for updating the display when
being selected, if desired.
'''
self.is_selected = True
def deselect(self, *args):
'''The list item is responsible for updating the display when
being unselected, if desired.
'''
self.is_selected = False
|
neiudemo1/django
|
refs/heads/master
|
tests/generic_relations_regress/models.py
|
269
|
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.deletion import ProtectedError
from django.utils.encoding import python_2_unicode_compatible
__all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address',
'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2',
'Contact', 'Organization', 'Note', 'Company')
@python_2_unicode_compatible
class Link(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return "Link to %s id=%s" % (self.content_type, self.object_id)
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=100)
links = GenericRelation(Link)
def __str__(self):
return "Place: %s" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
def __str__(self):
return "Restaurant: %s" % self.name
@python_2_unicode_compatible
class Address(models.Model):
street = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=5)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode)
@python_2_unicode_compatible
class Person(models.Model):
account = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128)
addresses = GenericRelation(Address)
def __str__(self):
return self.name
class CharLink(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.CharField(max_length=100)
content_object = GenericForeignKey()
class TextLink(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.TextField()
content_object = GenericForeignKey()
class OddRelation1(models.Model):
name = models.CharField(max_length=100)
clinks = GenericRelation(CharLink)
class OddRelation2(models.Model):
name = models.CharField(max_length=100)
tlinks = GenericRelation(TextLink)
# models for test_q_object_or:
class Note(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
note = models.TextField()
class Contact(models.Model):
notes = GenericRelation(Note)
class Organization(models.Model):
name = models.CharField(max_length=255)
contacts = models.ManyToManyField(Contact, related_name='organizations')
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=100)
links = GenericRelation(Link)
def __str__(self):
return "Company: %s" % self.name
# For testing #13085 fix, we also use Note model defined above
class Developer(models.Model):
name = models.CharField(max_length=15)
@python_2_unicode_compatible
class Team(models.Model):
name = models.CharField(max_length=15)
members = models.ManyToManyField(Developer)
def __str__(self):
return "%s team" % self.name
def __len__(self):
return self.members.count()
class Guild(models.Model):
name = models.CharField(max_length=15)
members = models.ManyToManyField(Developer)
def __nonzero__(self):
return self.members.count()
class Tag(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='g_r_r_tags')
object_id = models.CharField(max_length=15)
content_object = GenericForeignKey()
label = models.CharField(max_length=15)
class Board(models.Model):
name = models.CharField(primary_key=True, max_length=15)
class SpecialGenericRelation(GenericRelation):
def __init__(self, *args, **kwargs):
super(SpecialGenericRelation, self).__init__(*args, **kwargs)
self.editable = True
self.save_form_data_calls = 0
def save_form_data(self, *args, **kwargs):
self.save_form_data_calls += 1
class HasLinks(models.Model):
links = SpecialGenericRelation(Link)
class Meta:
abstract = True
class HasLinkThing(HasLinks):
pass
class A(models.Model):
flag = models.NullBooleanField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class B(models.Model):
a = GenericRelation(A)
class Meta:
ordering = ('id',)
class C(models.Model):
b = models.ForeignKey(B, models.CASCADE)
class Meta:
ordering = ('id',)
class D(models.Model):
b = models.ForeignKey(B, models.SET_NULL, null=True)
class Meta:
ordering = ('id',)
# Ticket #22998
class Node(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content = GenericForeignKey('content_type', 'object_id')
class Content(models.Model):
nodes = GenericRelation(Node)
related_obj = models.ForeignKey('Related', models.CASCADE)
class Related(models.Model):
pass
def prevent_deletes(sender, instance, **kwargs):
raise ProtectedError("Not allowed to delete.", [instance])
models.signals.pre_delete.connect(prevent_deletes, sender=Node)
|
manuelep/openshift_v3_test
|
refs/heads/master
|
wsgi/web2py/gluon/contrib/plural_rules/he.py
|
44
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for he (Hebrew)
nplurals=2 # Hebrew language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
|
gmacchi93/serverInfoParaguay
|
refs/heads/master
|
apps/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
yawnosnorous/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/multiprocessing/forking.py
|
45
|
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Try making some callable types picklable
#
from pickle import _Pickler as Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
if isinstance(rv, str):
self.save_global(obj, rv)
else:
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
ForkingPickler.register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import _thread
import msvcrt
import _subprocess
import time
from pickle import dump, load, HIGHEST_PROTOCOL
from _multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = _thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
ForkingPickler.register(Connection, reduce_connection)
ForkingPickler.register(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
# XXX (ncoghlan): The following code makes several bogus
# assumptions regarding the relationship between __file__
# and a module's real name. See PEP 302 and issue #10845
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
|
pdufour/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/sql/ddl.py
|
57
|
# sql/ddl.py
# Copyright (C) 2009-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from .. import util
from .elements import ClauseElement
from .base import Executable, _generative, SchemaVisitor, _bind_or_error
from ..util import topological
from .. import event
from .. import exc
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = Executable.\
_execution_options.union({'autocommit': True})
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_ddl(self, multiparams, params)
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
@util.deprecated("0.7", "See :class:`.DDLEvents`, as well as "
":meth:`.DDLElement.execute_if`.")
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`.MetaData` and :class:`.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(event_name,
target, connection, **kw):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace('-', '_'), call_event)
@_generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`.Table` or :class:`.MetaData` object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and \
not self._should_execute_deprecated(None, target, bind, **kw):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if (self.callable_ is not None and
not self.callable_(self, target, bind,
state=self.state, **kw)):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, util.string_types + (tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`.Table` or
:class:`.MetaData` objects as targets. Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substitutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
.. deprecated:: 0.7
See :meth:`.DDLElement.execute_if`.
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
class _CreateDropBase(DDLElement):
"""Base class for DDL constructs that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
.. versionadded:: 0.7.4
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
.. versionadded:: 0.7.4
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(
self, element, on=None, bind=None,
include_foreign_key_constraints=None):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
:param include_foreign_key_constraints: optional sequence of
:class:`.ForeignKeyConstraint` objects that will be included
inline within the CREATE construct; if omitted, all foreign key
constraints that do not specify use_alter=True are included.
.. versionadded:: 1.0.0
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column)
for column in element.columns
]
self.include_foreign_key_constraints = include_foreign_key_constraints
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`.Column` as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`.Table` as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`.Column.info` collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`.Table` which skips
rendering of the Postgresql ``xmin`` column against the Postgresql
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on Postgresql::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the Postgresql backend.
.. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports
skipping of columns by returning ``None`` from a custom compilation
rule.
.. versionadded:: 0.8 The :class:`.CreateColumn` construct was added
to support custom column creation styles.
"""
__visit_name__ = 'create_column'
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_create_sequence(self, sequence):
return self.dialect.supports_sequences and \
(
(not self.dialect.sequences_optional or
not sequence.optional) and
(
not self.checkfirst or
not self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = sort_tables_and_constraints(
[t for t in tables if self._can_create_table(t)])
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
event_collection = [
t for (t, fks) in collection if t is not None
]
metadata.dispatch.before_create(metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True)
else:
for fkc in fkcs:
self.traverse_single(fkc)
metadata.dispatch.after_create(metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_table(
self, table, create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
self.connection.execute(
CreateTable(
table,
include_foreign_key_constraints=include_foreign_key_constraints
))
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
table.dispatch.after_create(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(AddConstraint(constraint))
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = [t for t in tables if self._can_drop_table(t)]
collection = list(reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None
else None
)
))
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s, and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (
", ".join(sorted([t.fullname for t in err2.cycles]))
)
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_from_cause(
exc.CircularDependencyError(
err2.args[0],
err2.cycles, err2.edges,
msg="Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s. Please ensure "
"that the ForeignKey and ForeignKeyConstraint objects "
"involved in the cycle have "
"names so that they can be dropped using "
"DROP CONSTRAINT."
% (
", ".join(sorted([t.fullname for t in err2.cycles]))
)
)
)
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)
]
event_collection = [
t for (t, fks) in collection if t is not None
]
metadata.dispatch.before_drop(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
for table, fkcs in collection:
if table is not None:
self.traverse_single(
table, drop_ok=True, _is_metadata_operation=True)
else:
for fkc in fkcs:
self.traverse_single(fkc)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or self.dialect.has_table(
self.connection, table.name, schema=table.schema)
def _can_drop_sequence(self, sequence):
return self.dialect.supports_sequences and \
((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema))
)
def visit_index(self, index):
self.connection.execute(DropIndex(index))
def visit_table(self, table, drop_ok=False, _is_metadata_operation=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(DropTable(table))
table.dispatch.after_drop(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
self.connection.execute(DropConstraint(constraint))
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(tables, skip_fn=None, extra_dependencies=None):
"""sort a collection of :class:`.Table` objects based on dependency.
This is a dependency-ordered sort which will emit :class:`.Table`
objects such that they will follow their dependent :class:`.Table` objects.
Tables are dependent on another based on the presence of
:class:`.ForeignKeyConstraint` objects as well as explicit dependencies
added by :meth:`.Table.add_is_dependent_on`.
.. warning::
The :func:`.sort_tables` function cannot by itself accommodate
automatic resolution of dependency cycles between tables, which
are usually caused by mutually dependent foreign key constraints.
To resolve these cycles, either the
:paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled
to those constraints, or use the
:func:`.sql.sort_tables_and_constraints` function which will break
out foreign key constraints involved in cycles separately.
:param tables: a sequence of :class:`.Table` objects.
:param skip_fn: optional callable which will be passed a
:class:`.ForeignKey` object; if it returns True, this
constraint will not be considered as a dependency. Note this is
**different** from the same parameter in
:func:`.sort_tables_and_constraints`, which is
instead passed the owning :class:`.ForeignKeyConstraint` object.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. seealso::
:func:`.sort_tables_and_constraints`
:meth:`.MetaData.sorted_tables` - uses this function to sort
"""
if skip_fn is not None:
def _skip_fn(fkc):
for fk in fkc.elements:
if skip_fn(fk):
return True
else:
return None
else:
_skip_fn = None
return [
t for (t, fkcs) in
sort_tables_and_constraints(
tables, filter_fn=_skip_fn, extra_dependencies=extra_dependencies)
if t is not None
]
def sort_tables_and_constraints(
tables, filter_fn=None, extra_dependencies=None):
"""sort a collection of :class:`.Table` / :class:`.ForeignKeyConstraint`
objects.
This is a dependency-ordered sort which will emit tuples of
``(Table, [ForeignKeyConstraint, ...])`` such that each
:class:`.Table` follows its dependent :class:`.Table` objects.
Remaining :class:`.ForeignKeyConstraint` objects that are separate due to
dependency rules not satisifed by the sort are emitted afterwards
as ``(None, [ForeignKeyConstraint ...])``.
Tables are dependent on another based on the presence of
:class:`.ForeignKeyConstraint` objects, explicit dependencies
added by :meth:`.Table.add_is_dependent_on`, as well as dependencies
stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn`
and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies`
parameters.
:param tables: a sequence of :class:`.Table` objects.
:param filter_fn: optional callable which will be passed a
:class:`.ForeignKeyConstraint` object, and returns a value based on
whether this constraint should definitely be included or excluded as
an inline constraint, or neither. If it returns False, the constraint
will definitely be included as a dependency that cannot be subject
to ALTER; if True, it will **only** be included as an ALTER result at
the end. Returning None means the constraint is included in the
table-based result unless it is detected as part of a dependency cycle.
:param extra_dependencies: a sequence of 2-tuples of tables which will
also be considered as dependent on each other.
.. versionadded:: 1.0.0
.. seealso::
:func:`.sort_tables`
"""
fixed_dependencies = set()
mutable_dependencies = set()
if extra_dependencies is not None:
fixed_dependencies.update(extra_dependencies)
remaining_fkcs = set()
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
remaining_fkcs.add(fkc)
continue
if filter_fn:
filtered = filter_fn(fkc)
if filtered is True:
remaining_fkcs.add(fkc)
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.add((dependent_on, table))
fixed_dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
try:
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies), tables,
deterministic_order=True
)
)
except exc.CircularDependencyError as err:
for edge in err.edges:
if edge in mutable_dependencies:
table = edge[1]
can_remove = [
fkc for fkc in table.foreign_key_constraints
if filter_fn is None or filter_fn(fkc) is not False]
remaining_fkcs.update(can_remove)
for fkc in can_remove:
dependent_on = fkc.referred_table
if dependent_on is not table:
mutable_dependencies.discard((dependent_on, table))
candidate_sort = list(
topological.sort(
fixed_dependencies.union(mutable_dependencies), tables,
deterministic_order=True
)
)
return [
(table, table.foreign_key_constraints.difference(remaining_fkcs))
for table in candidate_sort
] + [(None, list(remaining_fkcs))]
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/py2exe-0.6.8/test/setup_win32com_shell.py
|
3
|
import sys
# ModuleFinder can't handle runtime changes to __path__, but win32com uses them
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell"]: #,"win32com.mapi"
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
# no build path setup, no worries.
pass
from distutils.core import setup
import py2exe
setup(
console=['%s'],
)
|
darolt/ndnSIMQoS
|
refs/heads/master
|
src/tap-bridge/bindings/modulegen_customizations.py
|
203
|
import os
def post_register_types(root_module):
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'TapBridge' not in enabled_features:
for clsname in ['TapBridge', 'TapBridgeHelper', 'TapBridgeFdReader']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::TapBridge::Mode'])
|
xindus40223115/w17_test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/time.py
|
601
|
class _clock():
def __init__(self):
pass
def tick(self, framerate):
pass
def Clock():
return _clock()
def get_ticks(t):
pass
|
vmax-feihu/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/createsuperuser/models.py
|
12133432
| |
cloudbase/nova
|
refs/heads/master
|
nova/tests/unit/objects/test_instance_device_metadata.py
|
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.tests.unit.objects import test_objects
fake_net_interface_meta = objects.NetworkInterfaceMetadata(
mac='52:54:00:f6:35:8f',
tags=['mytag1'],
bus=objects.PCIDeviceBus(address='0000:00:03.0'))
fake_pci_disk_meta = objects.DiskMetadata(
bus=objects.PCIDeviceBus(address='0000:00:09.0'),
tags=['nfvfunc3'])
fake_obj_devices_metadata = objects.InstanceDeviceMetadata(
devices=[fake_net_interface_meta, fake_pci_disk_meta])
fake_devices_metadata = fake_obj_devices_metadata._to_json()
fake_db_metadata = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'device_metadata': fake_obj_devices_metadata._to_json()
}
fake_old_db_metadata = dict(fake_db_metadata) # copy
fake_old_db_metadata['device_metadata'] = jsonutils.dumps(
fake_devices_metadata)
def get_fake_obj_device_metadata(context):
fake_obj_devices_metadata_cpy = fake_obj_devices_metadata.obj_clone()
fake_obj_devices_metadata_cpy._context = context
return fake_obj_devices_metadata_cpy
class _TestInstanceDeviceMetadata(object):
def _check_object(self, obj_meta):
self.assertTrue(isinstance(obj_meta,
objects.NetworkInterfaceMetadata) or
isinstance(obj_meta, objects.DiskMetadata))
if isinstance(obj_meta, objects.NetworkInterfaceMetadata):
self.assertEqual(obj_meta.mac, '52:54:00:f6:35:8f')
self.assertEqual(obj_meta.tags, ['mytag1'])
self.assertTrue(obj_meta.bus, objects.PCIDeviceBus)
self.assertEqual(obj_meta.bus.address, '0000:00:03.0')
elif isinstance(obj_meta, objects.DiskMetadata):
self.assertTrue(obj_meta.bus, objects.PCIDeviceBus)
self.assertEqual(obj_meta.bus.address, '0000:00:09.0')
self.assertEqual(obj_meta.tags, ['nfvfunc3'])
@mock.patch('nova.db.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_db_metadata
inst_meta = objects.InstanceDeviceMetadata
dev_meta = inst_meta.get_by_instance_uuid(
self.context, 'fake_uuid')
for obj_meta, fake_meta in zip(
dev_meta.devices,
fake_obj_devices_metadata.devices):
self._check_object(obj_meta)
def test_obj_from_db(self):
db_meta = fake_db_metadata['device_metadata']
metadata = objects.InstanceDeviceMetadata.obj_from_db(None, db_meta)
for obj_meta in metadata.devices:
self._check_object(obj_meta)
class TestInstanceDeviceMetadata(test_objects._LocalTest,
_TestInstanceDeviceMetadata):
pass
class TestInstanceDeviceMetadataRemote(test_objects._RemoteTest,
_TestInstanceDeviceMetadata):
pass
|
dhuang/incubator-airflow
|
refs/heads/master
|
airflow/api/common/experimental/__init__.py
|
1049
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jordanpina/Hax
|
refs/heads/master
|
node_modules/nodemon/travis_after_all.py
|
229
|
import os
import sys
import json
import time
import logging
try:
from functools import reduce
except ImportError:
pass
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
GITHUB_TOKEN = 'GITHUB_TOKEN'
# Travis API entry point, there are at least https://api.travis-ci.com and https://api.travis-ci.org
travis_entry = sys.argv[1] if len(sys.argv) > 1 else 'https://api.travis-ci.org'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
gh_token = os.getenv(GITHUB_TOKEN)
# assume, first job is the leader
def is_leader(job_number):
return job_number.endswith('.1')
job_number = os.getenv(TRAVIS_JOB_NUMBER)
if not job_number:
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(job_number):
log.info("This is a leader")
else:
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot(travis_token):
"""
:return: Matrix List
"""
headers = {'content-type': 'application/json', 'Authorization': 'token {}'.format(travis_token)}
req = urllib2.Request("{0}/builds/{1}".format(travis_entry, build_id), headers=headers)
response = urllib2.urlopen(req).read()
raw_json = json.loads(response.decode('utf-8'))
matrix_without_leader = [MatrixElement(job) for job in raw_json["matrix"] if not is_leader(job['number'])]
return matrix_without_leader
def wait_others_to_finish(travis_token):
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot(travis_token)
finished = [job.is_finished for job in snapshot if not job.is_leader]
return reduce(lambda a, b: a and b, finished), [job.number for job in snapshot if
not job.is_leader and not job.is_finished]
while True:
finished, waiting_list = others_finished()
if finished:
break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
def get_token():
assert gh_token, 'GITHUB_TOKEN is not set'
data = {"github_token": gh_token}
headers = {'content-type': 'application/json', 'User-Agent': 'Travis/1.0'}
req = urllib2.Request("{0}/auth/github".format(travis_entry), json.dumps(data).encode('utf-8'), headers)
response = urllib2.urlopen(req).read()
travis_token = json.loads(response.decode('utf-8')).get('access_token')
return travis_token
try:
token = get_token()
wait_others_to_finish(token)
final_snapshot = matrix_snapshot(token)
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
|
memtoko/django
|
refs/heads/master
|
django/contrib/auth/__init__.py
|
387
|
import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if ('django.contrib.auth.middleware.SessionAuthenticationMiddleware'
in settings.MIDDLEWARE_CLASSES and hasattr(user, 'get_session_auth_hash')):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user if
django.contrib.auth.middleware.SessionAuthenticationMiddleware is enabled.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
|
sgaist/django-haystack
|
refs/heads/master
|
haystack/backends/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from copy import deepcopy
from time import time
from django.conf import settings
from django.db.models import Q
from django.db.models.base import ModelBase
from django.utils import six
from django.utils import tree
from django.utils.encoding import force_text
from haystack.constants import VALID_FILTERS, FILTER_SEPARATOR, DEFAULT_ALIAS
from haystack.exceptions import MoreLikeThisError, FacetingError
from haystack.models import SearchResult
from haystack.utils.loading import UnifiedIndex
from haystack.utils import get_model_ct
VALID_GAPS = ['year', 'month', 'day', 'hour', 'minute', 'second']
SPELLING_SUGGESTION_HAS_NOT_RUN = object()
def log_query(func):
"""
A decorator for pseudo-logging search queries. Used in the ``SearchBackend``
to wrap the ``search`` method.
"""
def wrapper(obj, query_string, *args, **kwargs):
start = time()
try:
return func(obj, query_string, *args, **kwargs)
finally:
stop = time()
if settings.DEBUG:
from haystack import connections
connections[obj.connection_alias].queries.append({
'query_string': query_string,
'additional_args': args,
'additional_kwargs': kwargs,
'time': "%.3f" % (stop - start),
'start': start,
'stop': stop,
})
return wrapper
class EmptyResults(object):
hits = 0
docs = []
def __len__(self):
return 0
def __getitem__(self, k):
if isinstance(k, slice):
return []
else:
raise IndexError("It's not here.")
class BaseSearchBackend(object):
"""
Abstract search engine base class.
"""
# Backends should include their own reserved words/characters.
RESERVED_WORDS = []
RESERVED_CHARACTERS = []
def __init__(self, connection_alias, **connection_options):
self.connection_alias = connection_alias
self.timeout = connection_options.get('TIMEOUT', 10)
self.include_spelling = connection_options.get('INCLUDE_SPELLING', False)
self.batch_size = connection_options.get('BATCH_SIZE', 1000)
self.silently_fail = connection_options.get('SILENTLY_FAIL', True)
self.distance_available = connection_options.get('DISTANCE_AVAILABLE', False)
def update(self, index, iterable, commit=True):
"""
Updates the backend when given a SearchIndex and a collection of
documents.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def remove(self, obj_or_string):
"""
Removes a document/object from the backend. Can be either a model
instance or the identifier (i.e. ``app_name.model_name.id``) in the
event the object no longer exists.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
@log_query
def search(self, query_string, **kwargs):
"""
Takes a query to search on and returns dictionary.
The query should be a string that is appropriate syntax for the backend.
The returned dictionary should contain the keys 'results' and 'hits'.
The 'results' value should be an iterable of populated SearchResult
objects. The 'hits' should be an integer count of the number of matched
results the search backend found.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, **extra_kwargs):
# A convenience method most backends should include in order to make
# extension easier.
raise NotImplementedError
def prep_value(self, value):
"""
Hook to give the backend a chance to prep an attribute value before
sending it to the search engine. By default, just force it to unicode.
"""
return force_text(value)
def more_like_this(self, model_instance, additional_query_string=None, result_class=None):
"""
Takes a model object and returns results the backend thinks are similar.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to fetch similar record via the 'more_like_this' method if supported by the backend.")
def extract_file_contents(self, file_obj):
"""
Hook to allow backends which support rich-content types such as PDF,
Word, etc. extraction to process the provided file object and return
the contents for indexing
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
raise NotImplementedError("Subclasses must provide a way to extract metadata via the 'extract' method if supported by the backend.")
def build_schema(self, fields):
"""
Takes a dictionary of fields and returns schema information.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to build their schema.")
def build_models_list(self):
"""
Builds a list of models for searching.
The ``search`` method should use this and the ``django_ct`` field to
narrow the results (unless the user indicates not to). This helps ignore
any results that are not currently handled models and ensures
consistent caching.
"""
from haystack import connections
models = []
for model in connections[self.connection_alias].get_unified_index().get_indexed_models():
models.append(get_model_ct(model))
return models
# Alias for easy loading within SearchQuery objects.
SearchBackend = BaseSearchBackend
class SearchNode(tree.Node):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
This object creates a tree, with children being a list of either more
``SQ`` objects or the expressions/values themselves.
"""
AND = 'AND'
OR = 'OR'
default = AND
# Start compat. Django 1.6 changed how ``tree.Node`` works, so we're going
# to patch back in the original implementation until time to rewrite this
# presents itself.
# See https://github.com/django/django/commit/d3f00bd.
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = SearchNode(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = SearchNode(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, SearchNode) and (node.connector == conn_type or len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector, self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector, not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector, self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children, self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
# End compat.
def __repr__(self):
return '<SQ: %s %s>' % (self.connector, self.as_query_string(self._repr_query_fragment_callback))
def _repr_query_fragment_callback(self, field, filter_type, value):
if six.PY3:
value = force_text(value)
else:
value = force_text(value).encode('utf8')
return "%s%s%s=%s" % (field, FILTER_SEPARATOR, filter_type, value)
def as_query_string(self, query_fragment_callback):
"""
Produces a portion of the search query from the current SQ and its
children.
"""
result = []
for child in self.children:
if hasattr(child, 'as_query_string'):
result.append(child.as_query_string(query_fragment_callback))
else:
expression, value = child
field, filter_type = self.split_expression(expression)
result.append(query_fragment_callback(field, filter_type, value))
conn = ' %s ' % self.connector
query_string = conn.join(result)
if query_string:
if self.negated:
query_string = 'NOT (%s)' % query_string
elif len(self.children) != 1:
query_string = '(%s)' % query_string
return query_string
def split_expression(self, expression):
"""Parses an expression and determines the field and filter type."""
parts = expression.split(FILTER_SEPARATOR)
field = parts[0]
if len(parts) == 1 or parts[-1] not in VALID_FILTERS:
filter_type = 'content'
else:
filter_type = parts.pop()
return (field, filter_type)
class SQ(Q, SearchNode):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
"""
pass
class BaseSearchQuery(object):
"""
A base class for handling the query itself.
This class acts as an intermediary between the ``SearchQuerySet`` and the
``SearchBackend`` itself.
The ``SearchQuery`` object maintains a tree of ``SQ`` objects. Each ``SQ``
object supports what field it looks up against, what kind of lookup (i.e.
the __'s), what value it's looking for, if it's a AND/OR/NOT and tracks
any children it may have. The ``SearchQuery.build_query`` method starts with
the root of the tree, building part of the final query at each node until
the full final query is ready for the ``SearchBackend``.
Backends should extend this class and provide implementations for
``build_query_fragment``, ``clean`` and ``run``. See the ``solr`` backend for an example
implementation.
"""
def __init__(self, using=DEFAULT_ALIAS):
self.query_filter = SearchNode()
self.order_by = []
self.models = set()
self.boost = {}
self.start_offset = 0
self.end_offset = None
self.highlight = False
self.facets = {}
self.date_facets = {}
self.query_facets = []
self.narrow_queries = set()
#: If defined, fields should be a list of field names - no other values
#: will be retrieved so the caller must be careful to include django_ct
#: and django_id when using code which expects those to be included in
#: the results
self.fields = []
# Geospatial-related information
self.within = {}
self.dwithin = {}
self.distance_point = {}
# Internal.
self._raw_query = None
self._raw_query_params = {}
self._more_like_this = False
self._mlt_instance = None
self._results = None
self._hit_count = None
self._facet_counts = None
self._stats = None
self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN
self.spelling_query = None
self.result_class = SearchResult
self.stats = {}
from haystack import connections
self._using = using
self.backend = connections[self._using].get_backend()
def __str__(self):
return self.build_query()
def __getstate__(self):
"""For pickling."""
obj_dict = self.__dict__.copy()
del(obj_dict['backend'])
return obj_dict
def __setstate__(self, obj_dict):
"""For unpickling."""
from haystack import connections
self.__dict__.update(obj_dict)
self.backend = connections[self._using].get_backend()
def has_run(self):
"""Indicates if any query has been been run."""
return None not in (self._results, self._hit_count)
def build_params(self, spelling_query=None):
"""Generates a list of params to use when searching."""
kwargs = {
'start_offset': self.start_offset,
}
if self.order_by:
kwargs['sort_by'] = self.order_by
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset
if self.highlight:
kwargs['highlight'] = self.highlight
if self.facets:
kwargs['facets'] = self.facets
if self.date_facets:
kwargs['date_facets'] = self.date_facets
if self.query_facets:
kwargs['query_facets'] = self.query_facets
if self.narrow_queries:
kwargs['narrow_queries'] = self.narrow_queries
if spelling_query:
kwargs['spelling_query'] = spelling_query
elif self.spelling_query:
kwargs['spelling_query'] = self.spelling_query
if self.boost:
kwargs['boost'] = self.boost
if self.within:
kwargs['within'] = self.within
if self.dwithin:
kwargs['dwithin'] = self.dwithin
if self.distance_point:
kwargs['distance_point'] = self.distance_point
if self.result_class:
kwargs['result_class'] = self.result_class
if self.fields:
kwargs['fields'] = self.fields
if self.models:
kwargs['models'] = self.models
return kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query=spelling_query)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""
Executes the More Like This. Returns a list of search results similar
to the provided document (and optionally query).
"""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
search_kwargs = {
'result_class': self.result_class,
}
if self.models:
search_kwargs['models'] = self.models
if kwargs:
search_kwargs.update(kwargs)
additional_query_string = self.build_query()
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
def run_raw(self, **kwargs):
"""Executes a raw query. Returns a list of search results."""
search_kwargs = self.build_params()
search_kwargs.update(self._raw_query_params)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(self._raw_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = results.get('facets', {})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def get_count(self):
"""
Returns the number of results the backend found for the query.
If the query has not been run, this will execute the query and store
the results.
"""
if self._hit_count is None:
# Limit the slice to 1 so we get a count without consuming
# everything.
if not self.end_offset:
self.end_offset = 1
if self._more_like_this:
# Special case for MLT.
self.run_mlt()
elif self._raw_query:
# Special case for raw queries.
self.run_raw()
else:
self.run()
return self._hit_count
def get_results(self, **kwargs):
"""
Returns the results received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._results is None:
if self._more_like_this:
# Special case for MLT.
self.run_mlt(**kwargs)
elif self._raw_query:
# Special case for raw queries.
self.run_raw(**kwargs)
else:
self.run(**kwargs)
return self._results
def get_facet_counts(self):
"""
Returns the facet counts received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._facet_counts is None:
self.run()
return self._facet_counts
def get_stats(self):
"""
Returns the stats received from the backend.
If the query has not been run, this will execute the query and store
the results
"""
if self._stats is None:
self.run()
return self._stats
def set_spelling_query(self, spelling_query):
self.spelling_query = spelling_query
def get_spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._spelling_suggestion is SPELLING_SUGGESTION_HAS_NOT_RUN:
self.run(spelling_query=preferred_query)
return self._spelling_suggestion
def boost_fragment(self, boost_word, boost_value):
"""Generates query fragment for boosting a single word/value pair."""
return "%s^%s" % (boost_word, boost_value)
def matching_all_fragment(self):
"""Generates the query that matches all documents."""
return '*'
def build_query(self):
"""
Interprets the collected query metadata and builds the final query to
be sent to the backend.
"""
final_query = self.query_filter.as_query_string(self.build_query_fragment)
if not final_query:
# Match all.
final_query = self.matching_all_fragment()
if self.boost:
boost_list = []
for boost_word, boost_value in self.boost.items():
boost_list.append(self.boost_fragment(boost_word, boost_value))
final_query = "%s %s" % (final_query, " ".join(boost_list))
return final_query
def combine(self, rhs, connector=SQ.AND):
if connector == SQ.AND:
self.add_filter(rhs.query_filter)
elif connector == SQ.OR:
self.add_filter(rhs.query_filter, use_or=True)
# Methods for backends to implement.
def build_query_fragment(self, field, filter_type, value):
"""
Generates a query fragment from a field, filter type and a value.
Must be implemented in backends as this will be highly backend specific.
"""
raise NotImplementedError("Subclasses must provide a way to generate query fragments via the 'build_query_fragment' method.")
# Standard methods to alter the query.
def clean(self, query_fragment):
"""
Provides a mechanism for sanitizing user input before presenting the
value to the backend.
A basic (override-able) implementation is provided.
"""
if not isinstance(query_fragment, six.string_types):
return query_fragment
words = query_fragment.split()
cleaned_words = []
for word in words:
if word in self.backend.RESERVED_WORDS:
word = word.replace(word, word.lower())
for char in self.backend.RESERVED_CHARACTERS:
word = word.replace(char, '\\%s' % char)
cleaned_words.append(word)
return ' '.join(cleaned_words)
def build_not_query(self, query_string):
if ' ' in query_string:
query_string = "(%s)" % query_string
return u"NOT %s" % query_string
def build_exact_query(self, query_string):
return u'"%s"' % query_string
def add_filter(self, query_filter, use_or=False):
"""
Adds a SQ to the current query.
"""
if use_or:
connector = SQ.OR
else:
connector = SQ.AND
if self.query_filter and query_filter.connector != connector and len(query_filter) > 1:
self.query_filter.start_subtree(connector)
subtree = True
else:
subtree = False
for child in query_filter.children:
if isinstance(child, tree.Node):
self.query_filter.start_subtree(connector)
self.add_filter(child)
self.query_filter.end_subtree()
else:
expression, value = child
self.query_filter.add((expression, value), connector)
connector = query_filter.connector
if query_filter.negated:
self.query_filter.negate()
if subtree:
self.query_filter.end_subtree()
def add_order_by(self, field):
"""Orders the search result by a field."""
self.order_by.append(field)
def clear_order_by(self):
"""
Clears out all ordering that has been already added, reverting the
query to relevancy.
"""
self.order_by = []
def add_model(self, model):
"""
Restricts the query requiring matches in the given model.
This builds upon previous additions, so you can limit to multiple models
by chaining this method several times.
"""
if not isinstance(model, ModelBase):
raise AttributeError('The model being added to the query must derive from Model.')
self.models.add(model)
def set_limits(self, low=None, high=None):
"""Restricts the query by altering either the start, end or both offsets."""
if low is not None:
self.start_offset = int(low)
if high is not None:
self.end_offset = int(high)
def clear_limits(self):
"""Clears any existing limits."""
self.start_offset, self.end_offset = 0, None
def add_boost(self, term, boost_value):
"""Adds a boosted term and the amount to boost it to the query."""
self.boost[term] = boost_value
def raw_search(self, query_string, **kwargs):
"""
Runs a raw query (no parsing) against the backend.
This method causes the SearchQuery to ignore the standard query
generating facilities, running only what was provided instead.
Note that any kwargs passed along will override anything provided
to the rest of the ``SearchQuerySet``.
"""
self._raw_query = query_string
self._raw_query_params = kwargs
def more_like_this(self, model_instance):
"""
Allows backends with support for "More Like This" to return results
similar to the provided instance.
"""
self._more_like_this = True
self._mlt_instance = model_instance
def add_stats_query(self, stats_field, stats_facets):
"""Adds stats and stats_facets queries for the Solr backend."""
self.stats[stats_field] = stats_facets
def add_highlight(self, **kwargs):
"""Adds highlighting to the search results."""
self.highlight = kwargs or True
def add_within(self, field, point_1, point_2):
"""Adds bounding box parameters to search query."""
from haystack.utils.geo import ensure_point
self.within = {
'field': field,
'point_1': ensure_point(point_1),
'point_2': ensure_point(point_2),
}
def add_dwithin(self, field, point, distance):
"""Adds radius-based parameters to search query."""
from haystack.utils.geo import ensure_point, ensure_distance
self.dwithin = {
'field': field,
'point': ensure_point(point),
'distance': ensure_distance(distance),
}
def add_distance(self, field, point):
"""
Denotes that results should include distance measurements from the
point passed in.
"""
from haystack.utils.geo import ensure_point
self.distance_point = {
'field': field,
'point': ensure_point(point),
}
def add_field_facet(self, field, **options):
"""Adds a regular facet on a field."""
from haystack import connections
field_name = connections[self._using].get_unified_index().get_facet_fieldname(field)
self.facets[field_name] = options.copy()
def add_date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds a date-based facet on a field."""
from haystack import connections
if gap_by not in VALID_GAPS:
raise FacetingError("The gap_by ('%s') must be one of the following: %s." % (gap_by, ', '.join(VALID_GAPS)))
details = {
'start_date': start_date,
'end_date': end_date,
'gap_by': gap_by,
'gap_amount': gap_amount,
}
self.date_facets[connections[self._using].get_unified_index().get_facet_fieldname(field)] = details
def add_query_facet(self, field, query):
"""Adds a query facet on a field."""
from haystack import connections
self.query_facets.append((connections[self._using].get_unified_index().get_facet_fieldname(field), query))
def add_narrow_query(self, query):
"""
Narrows a search to a subset of all documents per the query.
Generally used in conjunction with faceting.
"""
self.narrow_queries.add(query)
def set_result_class(self, klass):
"""
Sets the result class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
if klass is None:
klass = SearchResult
self.result_class = klass
def post_process_facets(self, results):
# Handle renaming the facet fields. Undecorate and all that.
from haystack import connections
revised_facets = {}
field_data = connections[self._using].get_unified_index().all_searchfields()
for facet_type, field_details in results.get('facets', {}).items():
temp_facets = {}
for field, field_facets in field_details.items():
fieldname = field
if field in field_data and hasattr(field_data[field], 'get_facet_for_name'):
fieldname = field_data[field].get_facet_for_name()
temp_facets[fieldname] = field_facets
revised_facets[facet_type] = temp_facets
return revised_facets
def using(self, using=None):
"""
Allows for overriding which connection should be used. This
disables the use of routers when performing the query.
If ``None`` is provided, it has no effect on what backend is used.
"""
return self._clone(using=using)
def _reset(self):
"""
Resets the instance's internal state to appear as though no query has
been run before. Only need to tweak a few variables we check.
"""
self._results = None
self._hit_count = None
self._facet_counts = None
self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN
def _clone(self, klass=None, using=None):
if using is None:
using = self._using
else:
from haystack import connections
klass = connections[using].query
if klass is None:
klass = self.__class__
clone = klass(using=using)
clone.query_filter = deepcopy(self.query_filter)
clone.order_by = self.order_by[:]
clone.models = self.models.copy()
clone.boost = self.boost.copy()
clone.highlight = self.highlight
clone.stats = self.stats.copy()
clone.facets = self.facets.copy()
clone.date_facets = self.date_facets.copy()
clone.query_facets = self.query_facets[:]
clone.narrow_queries = self.narrow_queries.copy()
clone.start_offset = self.start_offset
clone.end_offset = self.end_offset
clone.result_class = self.result_class
clone.within = self.within.copy()
clone.dwithin = self.dwithin.copy()
clone.distance_point = self.distance_point.copy()
clone._raw_query = self._raw_query
clone._raw_query_params = self._raw_query_params
clone.spelling_query = self.spelling_query
clone._more_like_this = self._more_like_this
clone._mlt_instance = self._mlt_instance
return clone
class BaseEngine(object):
backend = BaseSearchBackend
query = BaseSearchQuery
unified_index = UnifiedIndex
def __init__(self, using=None):
if using is None:
using = DEFAULT_ALIAS
self.using = using
self.options = settings.HAYSTACK_CONNECTIONS.get(self.using, {})
self.queries = []
self._index = None
self._backend = None
def get_backend(self):
if self._backend is None:
self._backend = self.backend(self.using, **self.options)
return self._backend
def reset_sessions(self):
"""Reset any transient connections, file handles, etc."""
self._backend = None
def get_query(self):
return self.query(using=self.using)
def reset_queries(self):
del self.queries[:]
def get_unified_index(self):
if self._index is None:
self._index = self.unified_index(self.options.get('EXCLUDED_INDEXES', []))
return self._index
|
sigmunau/nav
|
refs/heads/master
|
python/nav/smidumps/cisco_ietf_ip_mib.py
|
4
|
# python version 1.0 DO NOT EDIT
#
# Generated by smidump version 0.4.7:
#
# smidump -f python CISCO-IETF-IP-MIB
FILENAME = "/home/magnusme/mibs/v2/CISCO-IETF-IP-MIB.my"
MIB = {
"moduleName" : "CISCO-IETF-IP-MIB",
"CISCO-IETF-IP-MIB" : {
"nodetype" : "module",
"language" : "SMIv2",
"organization" :
"""Cisco Systems, Inc.""",
"contact" :
""" Cisco Systems
Customer Service
Postal: 170 West Tasman Drive
San Jose, CA 95134
USA
Phone: +1 800 553-NETS
Email: cs-ipv6@cisco.com""",
"description" :
"""The MIB module for managing IP and ICMP implementations,
but excluding the management of IP routes.""",
"revisions" : (
{
"date" : "2002-03-04 00:00",
"description" :
"""The initial version of this MIB module.""",
},
),
"identity node" : "ciscoIetfIpMIB",
},
"imports" : (
{"module" : "SNMPv2-SMI", "name" : "MODULE-IDENTITY"},
{"module" : "SNMPv2-SMI", "name" : "OBJECT-TYPE"},
{"module" : "SNMPv2-SMI", "name" : "Integer32"},
{"module" : "SNMPv2-SMI", "name" : "Unsigned32"},
{"module" : "SNMPv2-SMI", "name" : "Counter32"},
{"module" : "SNMPv2-TC", "name" : "PhysAddress"},
{"module" : "SNMPv2-TC", "name" : "TruthValue"},
{"module" : "SNMPv2-TC", "name" : "TimeStamp"},
{"module" : "SNMPv2-TC", "name" : "RowPointer"},
{"module" : "SNMPv2-TC", "name" : "TEXTUAL-CONVENTION"},
{"module" : "SNMPv2-CONF", "name" : "MODULE-COMPLIANCE"},
{"module" : "SNMPv2-CONF", "name" : "OBJECT-GROUP"},
{"module" : "INET-ADDRESS-MIB", "name" : "InetAddress"},
{"module" : "INET-ADDRESS-MIB", "name" : "InetAddressType"},
{"module" : "INET-ADDRESS-MIB", "name" : "InetAddressPrefixLength"},
{"module" : "IF-MIB", "name" : "InterfaceIndex"},
{"module" : "IF-MIB", "name" : "InterfaceIndexOrZero"},
{"module" : "IF-MIB", "name" : "ifIndex"},
{"module" : "CISCO-SMI", "name" : "ciscoExperiment"},
),
"typedefs" : {
"Ipv6AddrIfIdentifier" : {
"basetype" : "OctetString",
"status" : "current",
"ranges" : [
{
"min" : "0",
"max" : "8"
},
],
"range" : {
"min" : "0",
"max" : "8"
},
"format" : "2x:",
"description" :
"""This data type is used to model IPv6 address
interface identifiers. This is a binary string
of up to 8 octets in network byte-order.""",
},
"ScopeId" : {
"basetype" : "Unsigned32",
"status" : "current",
"description" :
"""A Scope Identifier identifies an instance of a specific
scope.
The scope identifier MUST disambiguate identical address
values. For link-local addresses, the scope identifier
will typically be the interface index (ifIndex as
defined in the IF-MIB) of the interface on which the
address is configured.
The scope identifier may contain the special value 0
which refers to the default scope. The default scope
may be used in cases where the valid scope identifier
is not known (e.g., a management application needs to
write a site-local InetAddressIPv6 address without
knowing the site identifier value). The default scope
SHOULD NOT be used as an easy way out in cases where
the scope identifier for a non-global IPv6 address is
known.""",
},
}, # typedefs
"nodes" : {
"ciscoIetfIpMIB" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86",
"status" : "current",
}, # node
"ciscoIetfIpMIBObjects" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1",
}, # node
"cIp" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1",
}, # node
"cIpAddressPfxTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1",
"status" : "current",
"description" :
"""Inet prefix table.""",
}, # table
"cIpAddressPfxEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1",
"status" : "current",
"linkage" : [
"cIpAddressPfxIfIndex",
"cIpAddressPfxType",
"cIpAddressPfxPfx",
"cIpAddressPfxLength",
],
"description" :
"""Inet prefix entry.""",
}, # row
"cIpAddressPfxIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndex"},
},
"access" : "noaccess",
"description" :
"""The interface on which this prefix is configured.""",
}, # column
"cIpAddressPfxType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressType"},
},
"access" : "noaccess",
"description" :
"""The address type of cIpAddressPfxPfx. Only IPv4 and IPv6
addresses are expected.""",
}, # column
"cIpAddressPfxPfx" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "INET-ADDRESS-MIB",
"type" : "InetAddress",
},
"ranges" : [
{
"min" : "0",
"max" : "36"
},
],
"range" : {
"min" : "0",
"max" : "36"
},
},
},
"access" : "noaccess",
"description" :
"""The address prefix. Bits after cIpAddressPfxLength
must be zero.""",
}, # column
"cIpAddressPfxLength" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressPrefixLength"},
},
"access" : "noaccess",
"description" :
"""The prefix length associated with this prefix.""",
}, # column
"cIpAddressPfxOrigin" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"other" : {
"nodetype" : "namednumber",
"number" : "1"
},
"manual" : {
"nodetype" : "namednumber",
"number" : "2"
},
"wellknown" : {
"nodetype" : "namednumber",
"number" : "3"
},
"dhcp" : {
"nodetype" : "namednumber",
"number" : "4"
},
"routeradv" : {
"nodetype" : "namednumber",
"number" : "5"
},
},
},
"access" : "readonly",
"description" :
"""The origin of this prefix. manual(2) indicates a prefix
that was manually configured. wellknown(3) indicates a
well-known prefix, e.g. 169.254/16 for IPv4
autoconfiguration or fe80::/10 for IPv6 link-local
addresses. dhcp(4) indicates a prefix that was assigned
by a DHCP server. routeradv(5) indicates a prefix
learned from a router advertisement.""",
}, # column
"cIpAddressPfxOnLinkFlag" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readonly",
"description" :
"""This object has the value 'true(1)', if this prefix
can be used for on-link determination and the value
'false(2)' otherwise.""",
}, # column
"cIpAddressPfxAutonomousFlag" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readonly",
"description" :
"""Autonomous address configuration flag. When true(1),
indicates that this prefix can be used for autonomous
address configuration (i.e. can be used to form a local
interface address). If false(2), it is not used to
autoconfigure a local interface address.""",
}, # column
"cIpAddressPfxAdvPfdLifetime" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Unsigned32"},
},
"access" : "readonly",
"units" : "seconds",
"description" :
"""The length of time in seconds that this prefix will
remain preferred, i.e. time until deprecation. A
value of 4,294,967,295 represents infinity.
The address generated from a deprecated prefix should no
longer be used as a source address in new communications,
but packets received on such an interface are processed
as expected.""",
}, # column
"cIpAddressPfxAdvValidLifetime" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.1.1.9",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Unsigned32"},
},
"access" : "readonly",
"units" : "seconds",
"description" :
"""The length of time in seconds that this prefix will
remain valid, i.e. time until invalidation. A value of
4,294,967,295 represents infinity.
The address generated from an invalidated prefix should
not appear as the destination or source address of a
packet.""",
}, # column
"cIpAddressTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2",
"status" : "current",
"description" :
"""Inet address table.""",
}, # table
"cIpAddressEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1",
"status" : "current",
"linkage" : [
"cIpAddressAddrType",
"cIpAddressAddr",
],
"description" :
"""Inet addr entry.""",
}, # row
"cIpAddressAddrType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressType"},
},
"access" : "noaccess",
"description" :
"""The address type of cIpAddressAddr.""",
}, # column
"cIpAddressAddr" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "INET-ADDRESS-MIB",
"type" : "InetAddress",
},
"ranges" : [
{
"min" : "0",
"max" : "36"
},
],
"range" : {
"min" : "0",
"max" : "36"
},
},
},
"access" : "noaccess",
"description" :
"""The IP address to which this entry's addressing
information pertains.""",
}, # column
"cIpAddressIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndex"},
},
"access" : "readonly",
"description" :
"""The index value which uniquely identifies the interface
to which this entry is applicable. The interface
identified by a particular value of this index is the
same interface as identified by the same value of the
IF-MIB's ifIndex.""",
}, # column
"cIpAddressType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"unicast" : {
"nodetype" : "namednumber",
"number" : "1"
},
"anycast" : {
"nodetype" : "namednumber",
"number" : "2"
},
"broadcast" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readonly",
"description" :
"""The type of address. broadcast(3) is not a valid value
for IPv6 addresses.""",
}, # column
"cIpAddressPrefix" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowPointer"},
},
"access" : "readonly",
"description" :
"""A pointer to the row in the prefix table to which this
address belongs. May be { 0 0 } if there is no such
row.""",
}, # column
"cIpAddressOrigin" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.6",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"other" : {
"nodetype" : "namednumber",
"number" : "1"
},
"manual" : {
"nodetype" : "namednumber",
"number" : "2"
},
"wellknown" : {
"nodetype" : "namednumber",
"number" : "3"
},
"dhcp" : {
"nodetype" : "namednumber",
"number" : "4"
},
"linklayer" : {
"nodetype" : "namednumber",
"number" : "5"
},
"random" : {
"nodetype" : "namednumber",
"number" : "6"
},
},
},
"access" : "readonly",
"description" :
"""The origin of the address. manual(2) indicates that the
address was manually configured. wellknown(3) indicates
an address constructed from a well-known value, e.g. an
IANA-assigned anycast address. dhcp(4) indicates an
address that was assigned to this system by a DHCP
server. linklayer(5) indicates an address created by
IPv6 stateless autoconfiguration. random(6) indicates
an address chosen by random, e.g. an IPv4 address within
169.254/16, or an RFC 3041 privacy address.""",
}, # column
"cIpAddressStatus" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.2.1.7",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"preferred" : {
"nodetype" : "namednumber",
"number" : "1"
},
"deprecated" : {
"nodetype" : "namednumber",
"number" : "2"
},
"invalid" : {
"nodetype" : "namednumber",
"number" : "3"
},
"inaccessible" : {
"nodetype" : "namednumber",
"number" : "4"
},
"unknown" : {
"nodetype" : "namednumber",
"number" : "5"
},
"tentative" : {
"nodetype" : "namednumber",
"number" : "6"
},
"duplicate" : {
"nodetype" : "namednumber",
"number" : "7"
},
},
},
"access" : "readonly",
"description" :
"""Address status. The preferred(1) state indicates that
this is a valid address that can appear as the
destination or source address of a packet. The
deprecated(2) state indicates that this is a valid but
deprecated address that should no longer be used as a
source address in new communications, but packets
addressed to such an address are processed as expected.
The invalid(3) state indicates that this is not valid
address which should not appear as the destination or
source address of a packet. The inaccessible(4) state
indicates that the address is not accessible because
the interface to which this address is assigned is not
operational. The tentative(6) state indicates the
uniqueness of the address on the link is being verified.
The duplicate(7) state indicates the address has been
determined to be non-unique on the link and so must not
be used.
In the absence of other information, an IPv4 address is
always preferred(1).""",
}, # column
"cInetNetToMediaTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3",
"status" : "current",
"description" :
"""The IP Address Translation table used for mapping from IP
addresses to physical addresses.
The Address Translation tables contain the IP address to
'physical' address equivalences. Some interfaces do not
use translation tables for determining address
equivalences (e.g., DDN-X.25 has an algorithmic method);
if all interfaces are of this type, then the Address
Translation table is empty, i.e., has zero entries.""",
}, # table
"cInetNetToMediaEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1",
"create" : "true",
"status" : "current",
"linkage" : [
"ifIndex",
"cInetNetToMediaNetAddressType",
"cInetNetToMediaNetAddress",
],
"description" :
"""Each entry contains one IP address to `physical' address
equivalence.""",
}, # row
"cInetNetToMediaNetAddressType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressType"},
},
"access" : "noaccess",
"description" :
"""The type of cInetNetToMediaNetAddress.""",
}, # column
"cInetNetToMediaNetAddress" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "INET-ADDRESS-MIB",
"type" : "InetAddress",
},
"ranges" : [
{
"min" : "0",
"max" : "36"
},
],
"range" : {
"min" : "0",
"max" : "36"
},
},
},
"access" : "noaccess",
"description" :
"""The IP Address corresponding to the media-dependent
`physical' address.""",
}, # column
"cInetNetToMediaPhysAddress" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "PhysAddress"},
},
"access" : "readwrite",
"description" :
"""The media-dependent `physical' address.""",
}, # column
"cInetNetToMediaLastUpdated" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TimeStamp"},
},
"access" : "readonly",
"description" :
"""The value of sysUpTime at the time this entry was last
updated. If this entry was updated prior to the last re-
initialization of the local network management subsystem,
then this object contains a zero value.""",
}, # column
"cInetNetToMediaType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"other" : {
"nodetype" : "namednumber",
"number" : "1"
},
"invalid" : {
"nodetype" : "namednumber",
"number" : "2"
},
"dynamic" : {
"nodetype" : "namednumber",
"number" : "3"
},
"static" : {
"nodetype" : "namednumber",
"number" : "4"
},
"local" : {
"nodetype" : "namednumber",
"number" : "5"
},
},
},
"access" : "readwrite",
"description" :
"""The type of mapping.
Setting this object to the value invalid(2) has the
effect of invalidating the corresponding entry in the
cInetNetToMediaTable. That is, it effectively
disassociates the interface identified with said entry
from the mapping identified with said entry. It is an
implementation-specific matter as to whether the agent
removes an invalidated entry from the table.
Accordingly, management stations must be prepared to
receive tabular information from agents that corresponds
to entries not currently in use. Proper interpretation
of such entries requires examination of the relevant
cInetNetToMediaType object.
The 'dynamic(3)' type indicates that the IP address to
physical addresses mapping has been dynamically resolved
using e.g. IPv4 ARP or the IPv6 Neighbor Discovery
protocol.
The 'static(4)' type indicates that the mapping has been
statically configured. The 'local(5)' type indicates
that the mapping is provided for an entity's own
interface address.""",
}, # column
"cInetNetToMediaState" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.3.1.6",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"reachable" : {
"nodetype" : "namednumber",
"number" : "1"
},
"stale" : {
"nodetype" : "namednumber",
"number" : "2"
},
"delay" : {
"nodetype" : "namednumber",
"number" : "3"
},
"probe" : {
"nodetype" : "namednumber",
"number" : "4"
},
"invalid" : {
"nodetype" : "namednumber",
"number" : "5"
},
"unknown" : {
"nodetype" : "namednumber",
"number" : "6"
},
"incomplete" : {
"nodetype" : "namednumber",
"number" : "7"
},
},
},
"access" : "readonly",
"description" :
"""The Neighbor Unreachability Detection [3] state for the
interface when the address mapping in this entry is used.
If Neighbor Unreachability Detection is not in use (e.g.
for IPv4), this object is always unknown(6).""",
"reference>" :
"""RFC2461""",
}, # column
"cIpv6ScopeIdTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4",
"status" : "current",
"description" :
"""The table used to describe IPv6 unicast and multicast
scope zones.""",
}, # table
"cIpv6ScopeIdEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1",
"status" : "current",
"linkage" : [
"cIpv6ScopeIdIfIndex",
],
"description" :
"""Each entry contains the list of scope identifiers on a
given interface.""",
}, # row
"cIpv6ScopeIdIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndex"},
},
"access" : "noaccess",
"description" :
"""The interface to which these scopes belong.""",
}, # column
"cIpv6ScopeIdLinkLocal" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for the link-local scope on this
interface.""",
}, # column
"cIpv6ScopeIdSubnetLocal" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for the subnet-local scope on this
interface.""",
}, # column
"cIpv6ScopeIdAdminLocal" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for the admin-local scope on this
interface.""",
}, # column
"cIpv6ScopeIdSiteLocal" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for the site-local scope on this
interface.""",
}, # column
"cIpv6ScopeId6" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope 6 on this interface.""",
}, # column
"cIpv6ScopeId7" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope 7 on this interface.""",
}, # column
"cIpv6ScopeIdOrganizationLocal" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for the orgainzation-local scope on
this interface.""",
}, # column
"cIpv6ScopeId9" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.9",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope 9 on this interface.""",
}, # column
"cIpv6ScopeIdA" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.10",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope A on this interface.""",
}, # column
"cIpv6ScopeIdB" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.11",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope B on this interface.""",
}, # column
"cIpv6ScopeIdC" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.12",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope C on this interface.""",
}, # column
"cIpv6ScopeIdD" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.1.4.1.13",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "ScopeId"},
},
"access" : "readonly",
"description" :
"""The Scope Identifier for scope D on this interface.""",
}, # column
"cIpv6" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2",
}, # node
"cIpv6Forwarding" : {
"nodetype" : "scalar",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"forwarding" : {
"nodetype" : "namednumber",
"number" : "1"
},
"notForwarding" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""The indication of whether this entity is acting as an
IPv6 router in respect to the forwarding of datagrams
received by, but not addressed to, this entity. IPv6
routers forward datagrams. IPv6 hosts do not (except
those source-routed via the host).""",
}, # scalar
"cIpv6DefaultHopLimit" : {
"nodetype" : "scalar",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "255"
},
],
"range" : {
"min" : "0",
"max" : "255"
},
},
},
"access" : "readwrite",
"description" :
"""The default value inserted into the Hop Limit field of
the IPv6 header of datagrams originated at this entity,
whenever a Hop Limit value is not supplied by the
transport layer protocol.""",
}, # scalar
"cIpv6InterfaceTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3",
"status" : "current",
"description" :
"""The table containing per-interface IPv6-specific
information.""",
}, # table
"cIpv6InterfaceEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1",
"status" : "current",
"linkage" : [
"cIpv6InterfaceIfIndex",
],
"description" :
"""An entry containing IPv6-specific information for a given
interface.""",
}, # row
"cIpv6InterfaceIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndex"},
},
"access" : "noaccess",
"description" :
"""The interface for which this row contains IPv6-specific
information.""",
}, # column
"cIpv6InterfaceEffectiveMtu" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Unsigned32"},
},
"access" : "readonly",
"units" : "octets",
"description" :
"""The size of the largest IPv6 packet which can be
sent/received on the interface, specified in octets.""",
}, # column
"cIpv6InterfaceReasmMaxSize" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Unsigned32",
"ranges" : [
{
"min" : "0",
"max" : "65535"
},
],
"range" : {
"min" : "0",
"max" : "65535"
},
},
},
"access" : "readonly",
"units" : "octets",
"description" :
"""The size of the largest IPv6 datagram which this entity
can re-assemble from incoming IPv6 fragmented datagrams
received on this interface.""",
}, # column
"cIpv6InterfaceIdentifier" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"CISCO-IETF-IP-MIB", "name" : "Ipv6AddrIfIdentifier"},
},
"access" : "readwrite",
"description" :
"""The Interface Identifier for this interface that is (at
least) unique on the link this interface is attached to.
The Interface Identifier is combined with an address
prefix to form an interface address.
By default, the Interface Identifier is autoconfigured
according to the rules of the link type this interface is
attached to.""",
}, # column
"cIpv6InterfaceIdentifierLength" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "64"
},
],
"range" : {
"min" : "0",
"max" : "64"
},
},
},
"access" : "readwrite",
"units" : "bits",
"description" :
"""The length of the Interface Identifier in bits.""",
}, # column
"cIpv6InterfacePhysicalAddress" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.2.3.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "PhysAddress"},
},
"access" : "readonly",
"description" :
"""The interface's physical address. For example, for an
IPv6 interface attached to an 802.x link, this object
normally contains a MAC address. Note that in some
cases this address may differ from the address of the
interface's protocol sub-layer. The interface's
media-specific MIB must define the bit and byte
ordering and the format of the value of this object.
For interfaces which do not have such an address
(e.g., a serial line), this object should contain
an octet string of zero length.""",
}, # column
"cIcmp" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3",
}, # node
"cInetIcmpTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1",
"status" : "current",
"description" :
"""The table of generic ICMP counters. These counters may
be kept per-interface and/or system-wide.""",
}, # table
"cInetIcmpEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1",
"status" : "current",
"linkage" : [
"cInetIcmpAFType",
"cInetIcmpIfIndex",
],
"description" :
"""A conceptual row in the inetIcmpTable.
A row with an inetIcmpIfIndex value of zero indicates a
system-wide value; a row with a non-zero inetIcmpIfIndex
indicates an interface-specific value. A system may
provide both system-wide and interface-specific values,
in which case it is important to note that the
system-wide value may not be equal to the sum of the
interface-specific value across all interfaces due to
e.g. dynamic interface creation/deletion.""",
}, # row
"cInetIcmpAFType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressType"},
},
"access" : "noaccess",
"description" :
"""The IP address family of the statistics.""",
}, # column
"cInetIcmpIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndexOrZero"},
},
"access" : "noaccess",
"description" :
"""The ifindex of the interface, or zero for system-wide
stats.""",
}, # column
"cInetIcmpInMsgs" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The total number of ICMP messages which the entity
received. Note that this counter includes all those
counted by cInetIcmpInErrors.""",
}, # column
"cInetIcmpInErrors" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The number of ICMP messages which the entity received but
determined as having ICMP-specific errors (bad ICMP
checksums, bad length, etc.).""",
}, # column
"cInetIcmpOutMsgs" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The total number of ICMP messages which the entity
received. Note that this counter includes all those
counted by inetIcmpOutErrors.""",
}, # column
"cInetIcmpOutErrors" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.1.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The number of ICMP messages which this entity did not
send due to problems discovered within ICMP such as a
lack of buffers. This value should not include errors
discovered outside the ICMP layer such as the inability
of IP to route the resultant datagram. In some
implementations there may be no types of error which
contribute to this counter's value.""",
}, # column
"cInetIcmpMsgTable" : {
"nodetype" : "table",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2",
"status" : "current",
"description" :
"""The table of per-message ICMP counters. These counters
may be kept per-interface and/or system-wide.""",
}, # table
"cInetIcmpMsgEntry" : {
"nodetype" : "row",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1",
"status" : "current",
"linkage" : [
"cInetIcmpMsgAFType",
"cInetIcmpMsgIfIndex",
"cInetIcmpMsgType",
"cInetIcmpMsgCode",
],
"description" :
"""A conceptual row in the inetIcmpMsgTable.
A row with an inetIcmpMsgIfIndex value of zero indicates
a system-wide value; a row with a non-zero
cInetIcmpMsgIfIndex indicates an interface-specific
value. A system may provide both system-wide and
interface-specific values, in which case it is important
to note that the system-wide value may not be equal to
the sum of the interface-specific values across all
interfaces due to e.g. dynamic interface
creation/deletion.
If the system keeps track of individual ICMP code values
(e.g. destination unreachable, code administratively
prohibited), it creates several rows for each
inetIcmpMsgType, each with an appropriate value of
cInetIcmpMsgCode. A row with the special value of
cInetIcmpMsgCode, 256, counts all packets with type
cInetIcmpMsgType that aren't counted in rows with a value
of cInetIcmpMsgCode other than 256.""",
}, # row
"cInetIcmpMsgAFType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"INET-ADDRESS-MIB", "name" : "InetAddressType"},
},
"access" : "noaccess",
"description" :
"""The IP address family of the statistics.""",
}, # column
"cInetIcmpMsgIfIndex" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"IF-MIB", "name" : "InterfaceIndexOrZero"},
},
"access" : "noaccess",
"description" :
"""The ifindex of the interface, or zero for system-wide
stats.""",
}, # column
"cInetIcmpMsgType" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "255"
},
],
"range" : {
"min" : "0",
"max" : "255"
},
},
},
"access" : "noaccess",
"description" :
"""The ICMP type field of the message type being counted by
this row.""",
}, # column
"cInetIcmpMsgCode" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "256"
},
],
"range" : {
"min" : "0",
"max" : "256"
},
},
},
"access" : "noaccess",
"description" :
"""The ICMP code field of the message type being counted by
this row, or the special value 256 if no specific ICMP
code is counted by this row.""",
}, # column
"cInetIcmpMsgInPkts" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The number of input packets for this AF, ifindex, type,
code.""",
}, # column
"cInetIcmpMsgOutPkts" : {
"nodetype" : "column",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.1.3.2.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter32"},
},
"access" : "readonly",
"description" :
"""The number of output packets for this AF, ifindex, type,
code.""",
}, # column
"ciscoIpMIBConformance" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2",
}, # node
"ciscoIpMIBCompliances" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.1",
}, # node
"ciscoIpMIBGroups" : {
"nodetype" : "node",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2",
}, # node
}, # nodes
"groups" : {
"ciscoIpAddressPfxGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.1",
"status" : "current",
"members" : {
"cIpAddressPfxOrigin" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressPfxOnLinkFlag" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressPfxAutonomousFlag" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressPfxAdvPfdLifetime" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressPfxAdvValidLifetime" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The ip version independent address prefix group of entries
providing for basic management of IP prefixes.""",
}, # group
"ciscoIpAddressGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.2",
"status" : "current",
"members" : {
"cIpAddressIfIndex" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressType" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressPrefix" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressOrigin" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpAddressStatus" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The ip version independent address group of entries
providing for basic management of IP addresses.""",
}, # group
"ciscoInetNetToMediaGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.3",
"status" : "current",
"members" : {
"cInetNetToMediaPhysAddress" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetNetToMediaLastUpdated" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetNetToMediaType" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetNetToMediaState" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The group of entries providing IP address to physical
address mapping.""",
}, # group
"ciscoInetIcmpGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.4",
"status" : "current",
"members" : {
"cInetIcmpInMsgs" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetIcmpInErrors" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetIcmpOutMsgs" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetIcmpOutErrors" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The group of entries providing version independent
per-interface ICMP specific counters.""",
}, # group
"ciscoInetIcmpMsgGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.5",
"status" : "current",
"members" : {
"cInetIcmpMsgInPkts" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cInetIcmpMsgOutPkts" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The group of entries providing version independent
per-interface ICMP msg type specific counters.""",
}, # group
"ciscoIpv6GeneralGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.6",
"status" : "current",
"members" : {
"cIpv6Forwarding" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6DefaultHopLimit" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The IPv6 group of objects providing for basic
management of IPv6 entities.""",
}, # group
"ciscoIpv6InterfaceGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.7",
"status" : "current",
"members" : {
"cIpv6InterfaceEffectiveMtu" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6InterfaceReasmMaxSize" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6InterfaceIdentifier" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6InterfaceIdentifierLength" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6InterfacePhysicalAddress" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The IPv6 group of objects providing IPv6 interface
specific statistics.""",
}, # group
"ciscoIpv6ScopeGroup" : {
"nodetype" : "group",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.2.8",
"status" : "current",
"members" : {
"cIpv6ScopeIdLinkLocal" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdSubnetLocal" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdAdminLocal" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdSiteLocal" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeId6" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeId7" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdOrganizationLocal" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeId9" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdA" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdB" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdC" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
"cIpv6ScopeIdD" : {
"nodetype" : "member",
"module" : "CISCO-IETF-IP-MIB"
},
}, # members
"description" :
"""The group of objects for managing IPv6 scope zones.""",
}, # group
}, # groups
"compliances" : {
"ciscoIpMIBCompliance" : {
"nodetype" : "compliance",
"moduleName" : "CISCO-IETF-IP-MIB",
"oid" : "1.3.6.1.4.1.9.10.86.2.1.1",
"status" : "current",
"description" :
"""The compliance statement for systems which implement
IPv6 OR IPv4.""",
"requires" : {
"ciscoIpAddressPfxGroup" : {
"nodetype" : "mandatory",
"module" : "CISCO-IETF-IP-MIB"
},
"ciscoIpAddressGroup" : {
"nodetype" : "mandatory",
"module" : "CISCO-IETF-IP-MIB"
},
"ciscoInetNetToMediaGroup" : {
"nodetype" : "mandatory",
"module" : "CISCO-IETF-IP-MIB"
},
"ciscoInetIcmpGroup" : {
"nodetype" : "mandatory",
"module" : "CISCO-IETF-IP-MIB"
},
"ciscoInetIcmpMsgGroup" : {
"nodetype" : "mandatory",
"module" : "CISCO-IETF-IP-MIB"
},
"ciscoIpv6GeneralGroup" : {
"nodetype" : "optional",
"module" : "CISCO-IETF-IP-MIB",
"description" :
"""This group is mandatory only when IPv6 is implemented
on the system.""",
},
"ciscoIpv6InterfaceGroup" : {
"nodetype" : "optional",
"module" : "CISCO-IETF-IP-MIB",
"description" :
"""This group is mandatory only when IPv6 is implemented
on the system.""",
},
"ciscoIpv6ScopeGroup" : {
"nodetype" : "optional",
"module" : "CISCO-IETF-IP-MIB",
"description" :
"""The group is mandatory onle when IPv6 scoped architecture
has been implemented on the system.""",
},
}, # requires
}, # compliance
}, # compliances
}
|
ajack13/tournament
|
refs/heads/master
|
tournament_test.py
|
20
|
#!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
def testDeleteMatches():
deleteMatches()
print "1. Old matches can be deleted."
def testDelete():
deleteMatches()
deletePlayers()
print "2. Player records can be deleted."
def testCount():
deleteMatches()
deletePlayers()
c = countPlayers()
if c == '0':
raise TypeError(
"countPlayers() should return numeric zero, not string '0'.")
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "3. After deleting, countPlayers() returns zero."
def testRegister():
deleteMatches()
deletePlayers()
registerPlayer("Chandra Nalaar")
c = countPlayers()
if c != 1:
raise ValueError(
"After one player registers, countPlayers() should be 1.")
print "4. After registering a player, countPlayers() returns 1."
def testRegisterCountDelete():
deleteMatches()
deletePlayers()
registerPlayer("Markov Chaney")
registerPlayer("Joe Malik")
registerPlayer("Mao Tsu-hsi")
registerPlayer("Atlanta Hope")
c = countPlayers()
if c != 4:
raise ValueError(
"After registering four players, countPlayers should be 4.")
deletePlayers()
c = countPlayers()
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "5. Players can be registered and deleted."
def testStandingsBeforeMatches():
deleteMatches()
deletePlayers()
registerPlayer("Melpomene Murray")
registerPlayer("Randy Schwartz")
standings = playerStandings()
if len(standings) < 2:
raise ValueError("Players should appear in playerStandings even before "
"they have played any matches.")
elif len(standings) > 2:
raise ValueError("Only registered players should appear in standings.")
if len(standings[0]) != 4:
raise ValueError("Each playerStandings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
raise ValueError(
"Newly registered players should have no matches or wins.")
if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]):
raise ValueError("Registered players' names should appear in standings, "
"even if they have no matches played.")
print "6. Newly registered players appear in the standings with no matches."
def testReportMatches():
deleteMatches()
deletePlayers()
registerPlayer("Bruno Walton")
registerPlayer("Boots O'Neal")
registerPlayer("Cathy Burton")
registerPlayer("Diane Grant")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
standings = playerStandings()
for (i, n, w, m) in standings:
if m != 1:
raise ValueError("Each player should have one match recorded.")
if i in (id1, id3) and w != 1:
raise ValueError("Each match winner should have one win recorded.")
elif i in (id2, id4) and w != 0:
raise ValueError("Each match loser should have zero wins recorded.")
print "7. After a match, players have updated standings."
def testPairings():
deleteMatches()
deletePlayers()
registerPlayer("Twilight Sparkle")
registerPlayer("Fluttershy")
registerPlayer("Applejack")
registerPlayer("Pinkie Pie")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
pairings = swissPairings()
if len(pairings) != 2:
raise ValueError(
"For four players, swissPairings should return two pairs.")
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4)] = pairings
correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id4])])
actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4])])
if correct_pairs != actual_pairs:
raise ValueError(
"After one match, players with one win should be paired.")
print "8. After one match, players with one win are paired."
if __name__ == '__main__':
testDeleteMatches()
testDelete()
testCount()
testRegister()
testRegisterCountDelete()
testStandingsBeforeMatches()
testReportMatches()
testPairings()
print "Success! All tests pass!"
|
Y3K/django
|
refs/heads/master
|
tests/context_processors/views.py
|
346
|
from django.shortcuts import render
from .models import DebugObject
def request_processor(request):
return render(request, 'context_processors/request_attrs.html')
def debug_processor(request):
context = {'debug_objects': DebugObject.objects}
return render(request, 'context_processors/debug.html', context)
|
Vogtinator/micropython
|
refs/heads/nspire
|
tests/import/pkg3/mod2.py
|
103
|
print("mod2 __name__:", __name__)
print("in mod2")
def foo():
print("mod2.foo()")
|
raymondxyang/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/init_ops.py
|
32
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.deprecation import deprecated
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.zeros(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.ones(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
self.value = value
self.dtype = dtypes.as_dtype(dtype)
self._verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
if dtype is None:
dtype = self.dtype
if verify_shape is None:
verify_shape = self._verify_shape
return constant_op.constant(
self.value, dtype=dtype, shape=shape, verify_shape=verify_shape)
def get_config(self):
# We don't include `verify_shape` for compatibility with Keras.
# `verify_shape` should be passed as an argument to `__call__` rather
# than as a constructor argument: conceptually it isn't a property
# of the initializer.
return {"value": self.value, "dtype": self.dtype.name}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(
shape, self.minval, self.maxval, dtype, seed=self.seed)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed,
"dtype": self.dtype.name
}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@deprecated(None,
"Use tf.initializers.variance_scaling instead with distribution="
"uniform to get equivalent behavior.")
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(
shape, -max_val, max_val, dtype, seed=self.seed)
def get_config(self):
return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal":
stddev = math.sqrt(scale)
return random_ops.truncated_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(
shape, -limit, limit, dtype, seed=self.seed)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed,
"dtype": self.dtype.name
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
uniform random numbers. If the matrix has fewer rows than columns then the
output will have orthogonal rows. Otherwise, the output will have orthogonal
columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
ph = d / math_ops.abs(d)
q *= ph
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for 2D matrices.
Args:
gain: Multiplicative factor to apply to the identity matrix.
dtype: The type of the output.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
full_shape = shape if partition_info is None else partition_info.full_shape
if len(full_shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices.")
if dtype is None:
dtype = self.dtype
initializer = linalg_ops.eye(*full_shape, dtype=dtype)
if partition_info is not None:
initializer = array_ops.slice(initializer, partition_info.var_offset,
shape)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain, "dtype": self.dtype.name}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
orthogonal_initializer = Orthogonal
identity_initializer = Identity
# pylint: enable=invalid-name
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Args:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed, dtype=dtype)
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Args:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(
scale=1.0, mode="fan_avg", distribution="normal", seed=seed, dtype=dtype)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
|
moio/spacewalk
|
refs/heads/master
|
client/tools/rhncfg/config_common/file_utils.py
|
1
|
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import stat
import time
import tempfile
import base64
import difflib
import pwd
import grp
try:
from selinux import lgetfilecon
except:
# on rhel4 we do not support selinux
def lgetfilecon(path):
return [0, '']
from config_common import utils
from config_common.local_config import get as get_config
class FileProcessor:
file_struct_fields = {
'file_contents' : None,
'delim_start' : None,
'delim_end' : None,
}
def __init__(self):
pass
def process(self, file_struct, directory=None, strict_ownership=1):
# Older servers will not return directories; if filetype is missing,
# assume file
if file_struct.get('filetype') == 'directory':
return None, None
if directory:
directory += os.path.split(file_struct['path'])[0]
if file_struct.get('filetype') == 'symlink':
if not file_struct.has_key('symlink'):
raise Exception, "Missing key symlink"
(fullpath, dirs_created, fh) = maketemp(prefix=".rhn-cfg-tmp",
directory=directory, symlink=file_struct['symlink'])
return fullpath, dirs_created
for k in self.file_struct_fields.keys():
if not file_struct.has_key(k):
# XXX
raise Exception, "Missing key %s" % k
encoding = ''
if file_struct.has_key('encoding'):
encoding = file_struct['encoding']
contents = file_struct['file_contents']
if contents and (encoding == 'base64'):
contents = base64.decodestring(contents)
delim_start = file_struct['delim_start']
delim_end = file_struct['delim_end']
if ('checksum' in file_struct
and 'checksum_type' in file_struct
and 'verify_contents' in file_struct
and file_struct['verify_contents']):
if file_struct['checksum'] != utils.getContentChecksum(
file_struct['checksum_type'], contents):
raise Exception, "Corrupt file received: Content checksums do not match!"
elif ('md5sum' in file_struct and 'verify_contents' in file_struct
and file_struct['verify_contents']):
if file_struct['md5sum'] != utils.getContentChecksum(
'md5', contents):
raise Exception, "Corrupt file received: Content checksums do not match!"
elif ('verify_contents' in file_struct
and file_struct['verify_contents']):
raise Exception, "Corrupt file received: missing checksum information!"
fh = None
(fullpath, dirs_created, fh) = maketemp(prefix=".rhn-cfg-tmp",
directory=directory)
try:
fh.write(contents)
except Exception:
if fh:
fh.close() # don't leak fds...
raise
else:
fh.close()
# try to set mtime and ctime of the file to
# the last modified time on the server
if file_struct.has_key('modified'):
try:
modified = xmlrpc_time(file_struct['modified'].value)
epoch_time = time.mktime(modified)
os.utime(fullpath, (epoch_time, epoch_time))
except (ValueError, AttributeError):
# we can't parse modified time
pass
return fullpath, dirs_created
def diff(self, file_struct):
self._validate_struct(file_struct)
temp_file, temp_dirs = self.process(file_struct)
path = file_struct['path']
sectx_result = ''
owner_result = ''
group_result = ''
perm_result = ''
result = ''
stat_err = 0
try:
cur_stat = os.lstat(path)
except:
stat_err = 1
if file_struct['filetype'] != 'symlink':
if not stat_err:
#check for owner differences
cur_uid = cur_stat[stat.ST_UID]
try:
cur_user = pwd.getpwuid(cur_uid)[0]
except KeyError:
#Orphan UID with no name,return unknown
cur_user = "unknown(UID %d)" % (cur_uid,)
else:
cur_user = "missing"
if cur_user == file_struct['username']:
owner_result = ""
else:
owner_result = "User name differ: actual: [%s], expected: [%s]\n" % (cur_user, file_struct['username'])
if not stat_err:
#check for group differences
cur_gid = cur_stat[stat.ST_GID]
try:
cur_group = grp.getgrgid(cur_gid)[0]
except KeyError:
#Orphan GID with no name,return unknown
cur_group = "unknown(GID %d)" % (cur_gid,)
else:
cur_group = "missing"
if cur_group == file_struct['groupname']:
group_result = ""
else:
group_result = "Group name differ: actual: [%s], expected: [%s]\n" % (cur_group, file_struct['groupname'])
#check for permissions differences
if not stat_err:
cur_perm = str(oct(stat.S_IMODE(cur_stat[stat.ST_MODE])))
else:
cur_perm = "missing"
#rip off the leading '0' from the mode returned by stat()
if cur_perm[0] == '0':
cur_perm = cur_perm[1:]
#perm_status gets displayed with the verbose option.
if cur_perm == str(file_struct['filemode']):
perm_result = ""
else:
perm_result = "File mode differ: actual: [%s], expected: [%s]\n" % (cur_perm, file_struct['filemode'])
try:
cur_sectx = lgetfilecon(path)[1]
except OSError: # workarounding BZ 690238
cur_sectx = None
if cur_sectx == None:
cur_sectx = ''
if file_struct.has_key('selinux_ctx') and file_struct['selinux_ctx']:
if cur_sectx != file_struct['selinux_ctx']:
sectx_result = "SELinux contexts differ: actual: [%s], expected: [%s]\n" % (cur_sectx, file_struct['selinux_ctx'])
if file_struct['filetype'] == 'directory':
if os.path.isdir(file_struct['path']):
result = ''
else:
result = "Deployed directory is no longer a directory!"
elif file_struct['filetype'] == 'symlink':
try:
curlink = os.readlink(path)
newlink = os.readlink(temp_file)
if curlink == newlink:
result = ''
else:
result = "Link targets differ for [%s]: actual: [%s], expected: [%s]\n" % (path, curlink, newlink)
except OSError, e:
if e.errno == 22:
result = "Deployed symlink is no longer a symlink!"
else:
raise e
else:
result = ''.join(diff(temp_file, path,
display_diff=get_config('display_diff')))
if temp_file:
os.unlink(temp_file)
return owner_result + group_result + perm_result + sectx_result + result
def _validate_struct(self, file_struct):
for k in self.file_struct_fields.keys():
if not file_struct.has_key(k):
# XXX
raise Exception, "Missing key %s" % k
def diff(src, dst, srcname=None, dstname=None, display_diff=False):
def f_content(path, name):
statinfo = None
if os.access(path, os.R_OK):
f = open(path, 'U')
content = f.readlines()
f.close()
statinfo = os.stat(path)
f_time = time.ctime(statinfo.st_mtime)
if content and content[-1] and content[-1][-1] != "\n":
content[-1] += "\n"
else:
content = []
f_time = time.ctime(0)
if not name:
name = path
return (content, name, f_time, statinfo)
(src_content, src_name, src_time, src_stat) = f_content(src, srcname)
(dst_content, dst_name, dst_time, dst_stat) = f_content(dst, dstname)
diff_u = difflib.unified_diff(src_content, dst_content,
src_name, dst_name,
src_time, dst_time)
ret_list = list(diff_u)
# don't return the diff if the file is not readable by everyone
# for security reasons.
if (len(ret_list) > 0 # if differences exist
and not display_diff # and we have not explicitly decided to display
and (dst_stat == None # file is not there or not readable to root
or (dst_stat.st_uid == 0 # file is owned by root
and not dst_stat.st_mode & stat.S_IROTH))): # not read-all
ret_list = [
"Differences exist in a file that is not readable by all. ",
"Re-deployment of configuration file is recommended.\n"]
return ret_list
def maketemp(prefix=None, directory=None, symlink=None):
"""Creates a temporary file (guaranteed to be new), using the
specified prefix.
Returns the filename and an open stream
"""
if not directory:
directory = tempfile.gettempdir()
dirs_created = None
if not os.path.exists(directory):
dirs_created = utils.mkdir_p(directory)
if not prefix:
# Create the file in /tmp by default
prefix = 'rhncfg-tempfile'
file_prefix = "%s-%s-" % (prefix, os.getpid())
(fd, filename) = tempfile.mkstemp(prefix=file_prefix, dir=directory)
if symlink:
os.unlink(filename)
os.symlink(symlink, filename)
open_file = None
else:
open_file = os.fdopen(fd, "w+")
return filename, dirs_created, open_file
# Duplicated from backend/common/fileutils.py to remove dependency requirement.
# If making changes make them there too.
FILETYPE2CHAR = {
'file' : '-',
'directory' : 'd',
'symlink' : 'l',
'chardev' : 'c',
'blockdev' : 'b',
}
# Duplicated from backend/common/fileutils.py to remove dependency requirement.
# If making changes make them there too.
def _ifelse(cond, thenval, elseval):
if cond:
return thenval
else:
return elseval
# Duplicated from backend/common/fileutils.py to remove dependency requirement.
# If making changes make them there too.
def ostr_to_sym(octstr, ftype):
""" Convert filemode in octets (like '644') to string like "ls -l" ("-rwxrw-rw-")
ftype is one of: file, directory, symlink, chardev, blockdev.
"""
mode = int(str(octstr), 8)
symstr = FILETYPE2CHAR.get(ftype, '?')
symstr += _ifelse(mode & stat.S_IRUSR, 'r', '-')
symstr += _ifelse(mode & stat.S_IWUSR, 'w', '-')
symstr += _ifelse(mode & stat.S_IXUSR,
_ifelse(mode & stat.S_ISUID, 's', 'x'),
_ifelse(mode & stat.S_ISUID, 'S', '-'))
symstr += _ifelse(mode & stat.S_IRGRP, 'r', '-')
symstr += _ifelse(mode & stat.S_IWGRP, 'w', '-')
symstr += _ifelse(mode & stat.S_IXGRP,
_ifelse(mode & stat.S_ISGID, 's', 'x'),
_ifelse(mode & stat.S_ISGID, 'S', '-'))
symstr += _ifelse(mode & stat.S_IROTH, 'r', '-')
symstr += _ifelse(mode & stat.S_IWOTH, 'w', '-')
symstr += _ifelse(mode & stat.S_IXOTH,
_ifelse(mode & stat.S_ISVTX, 't', 'x'),
_ifelse(mode & stat.S_ISVTX, 'T', '-'))
return symstr
# Duplicated from backend/common/fileutils.py to remove dependency requirement.
# If making changes make them there too.
def f_date(dbiDate):
return "%04d-%02d-%02d %02d:%02d:%02d" % (dbiDate.year, dbiDate.month,
dbiDate.day, dbiDate.hour, dbiDate.minute, dbiDate.second)
def xmlrpc_time(xtime):
if xtime[8] == 'T':
# oracle backend: 20130304T23:19:17
timefmt='%Y%m%dT%H:%M:%S'
else:
# postresql backend format: 2014-02-28 18:47:31.506953+01:00
timefmt='%Y-%m-%d %H:%M:%S'
xtime = xtime[:19]
return time.strptime(xtime, timefmt)
|
comepradz/pybrain
|
refs/heads/master
|
pybrain/rl/environments/twoplayergames/tasks/handicaptask.py
|
25
|
from __future__ import print_function
__author__ = 'Tom Schaul, tom@idsia.ch'
from .capturetask import CaptureGameTask
from pybrain.rl.environments.twoplayergames.capturegameplayers.captureplayer import CapturePlayer
from pybrain.rl.environments.twoplayergames.capturegameplayers import ModuleDecidingPlayer
from pybrain.rl.environments.twoplayergames.capturegame import CaptureGame
# TODO: parametrize hard-coded variables.
# TODO: also allow handicap-advantage
class HandicapCaptureTask(CaptureGameTask):
""" Play against an opponent, and try to beat it with it having the maximal
number of handicap stones:
The score for this task is not the percentage of wins, but the achieved handicap against
the opponent when the results stabilize.
Stabilize: if after minimum of 6 games at the same handicap H, > 80% were won
by the player, increase the handicap. If <20% decrease it.
If the system fluctuates between H and H+1, with at least 10 games played on each level,
assert H+0.5 as handicap.
the score = 2 * #handicaps + proportion of wins at that level. """
maxGames = 200
averageOverGames = 1
minEvals = 5
def __init__(self, *args, **kargs):
CaptureGameTask.__init__(self, *args, **kargs)
self.size = self.env.size
# the maximal handicap given is a full line of stones along the second line.
self.maxHandicaps = (self.size - 2) * 2 + (self.size - 4) * 2
def winProp(self, h):
w, t, wms, lms = self.results[h]
if t > 0:
res = (w - t / 2.) / (t / 2.)
if wms > 0:
res += 0.1 / (t * wms)
if lms > 0:
res -= 1. / (t * lms)
return res
else:
return 0.
def goUp(self, h):
""" ready to go up one handicap? """
if self.results[h][1] >= self.minEvals:
return self.winProp(h) > 0.6
return False
def goDown(self, h):
""" have to go down one handicap? """
if self.results[h][1] >= self.minEvals:
return self.winProp(h) < -0.6
return False
def bestHandicap(self):
return max(self.results.keys()) - 1
def fluctuating(self):
""" Is the highest handicap unstable? """
high = self.bestHandicap()
if high > 0:
if self.results[high][1] >= 2 * self.minEvals and self.results[high - 1][1] >= 2 * self.minEvals:
return self.goUp(high - 1) and self.goDown(high)
return False
def stable(self, h):
return (self.fluctuating()
or (self.results[h][1] >= 2 * self.minEvals and (not self.goUp(h)) and (not self.goDown(h)))
or (self.results[h][1] >= 2 * self.minEvals and self.goUp(h) and h >= self.maxHandicaps)
or (self.results[h][1] >= 2 * self.minEvals and self.goDown(h) and h == 0))
def addResult(self, h, win, moves):
if h + 1 not in self.results:
self.results[h + 1] = [0, 0, 0, 0]
self.results[h][1] += 1
if win == True:
self.results[h][0] += 1
self.results[h][2] += moves
else:
self.results[h][3] += moves
def reset(self):
# stores [wins, total, sum(moves-til-win), sum(moves-til-lose)]
# for each handicap-key
self.results = {0: [0, 0, 0, 0]}
def f(self, player):
if not isinstance(player, CapturePlayer):
player = ModuleDecidingPlayer(player, self.env, greedySelection=True)
player.color = CaptureGame.WHITE
self.opponent.color = CaptureGame.BLACK
self.reset()
current = 0
games = 0
while games < self.maxGames and not self.stable(current):
games += 1
self.env.reset()
self.env.giveHandicap(current , self.opponent.color)
self.env.playToTheEnd(self.opponent, player)
win = self.env.winner == player.color
self.addResult(current, win, self.env.movesDone)
if self.goUp(current) and current < self.maxHandicaps:
current += 1
elif self.goDown(current) and current > 1:
current -= 1
high = self.bestHandicap()
# the scale goes from -1 to (the highest handicap + 1)
if not self.fluctuating():
return high + self.winProp(high)
else:
return (high - 0.5) + (self.winProp(high) + self.winProp(high - 1)) / 2.
if __name__ == '__main__':
from pybrain.rl.environments.twoplayergames.capturegameplayers import RandomCapturePlayer, KillingPlayer
h = HandicapCaptureTask(4, opponentStart=False)
p1 = RandomCapturePlayer(h.env)
p1 = KillingPlayer(h.env)
print((h(p1)))
print((h.results))
print((h.winProp(0)))
print((h.winProp(1)))
|
wangxuan007/flasky
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/command/build_ext.py
|
314
|
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.errors import DistutilsError
from distutils import log
import os
import sys
import itertools
from setuptools.extension import Library
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
try:
# Python 2.7 or >=3.2
from sysconfig import _CONFIG_VARS
except ImportError:
from distutils.sysconfig import get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
del get_config_var
from distutils.sysconfig import _config_vars as _CONFIG_VARS
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
|
geometalab/OSM-Crosswalk-Detection
|
refs/heads/master
|
tests/data/osm/__init__.py
|
12133432
| |
jonathonwalz/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/baz1/__init__.py
|
12133432
| |
jfietkau/Streets4MPI
|
refs/heads/master
|
settings.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# settings.py
# Copyright 2012 Julian Fietkau <http://www.julian-fietkau.de/>,
# Joachim Nitschke
#
# This file is part of Streets4MPI.
#
# Streets4MPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Streets4MPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Streets4MPI. If not, see <http://www.gnu.org/licenses/>.
#
settings = {
# technical settings
"osm_file" : "osm/test.osm",
"logging" : "stdout",
"persist_traffic_load" : True,
"random_seed" : 3756917, # set to None to use system time
# simulation settings
"max_simulation_steps" : 10,
"number_of_residents" : 100,
"use_residential_origins" : False,
# period over which the traffic is distributed (24h = the hole day)
"traffic_period_duration" : 8, # h
"car_length" : 4, # m
"min_breaking_distance" : 0.001, # m
# take breaking deceleration for asphalt
# see http://www.bense-jessen.de/Infos/Page10430/page10430.html
"braking_deceleration" : 7.5, # m/s²
"steps_between_street_construction" : 10,
"trip_volume" : 1
}
|
dustymabe/ansible-modules-core
|
refs/heads/devel
|
network/openswitch/ops_template.py
|
7
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: ops_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Push configuration to OpenSwitch
description:
- The OpenSwitch platform provides a library for pushing JSON structured
configuration files into the current running-config. This module
will read the current configuration from OpenSwitch and compare it
against a provided candidate configuration. If there are changes, the
candidate configuration is merged with the current configuration and
pushed into OpenSwitch
extends_documentation_fragment: openswitch
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: true
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: ['yes', 'no']
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backups/ in
the root of the playbook directory.
required: false
default: false
choices: ['yes', 'no']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
"""
EXAMPLES = """
- name: set hostname with file lookup
ops_template:
src: ./hostname.json
backup: yes
remote_user: admin
become: yes
- name: set hostname with var
ops_template:
src: "{{ config }}"
remote_user: admin
become: yes
"""
RETURN = """
updates:
description: The list of configuration updates to be merged
retured: always
type: dict
sample: {obj, obj}
responses:
desription: returns the responses when configuring using cli
returned: when transport == cli
type: list
sample: [...]
"""
import copy
def compare(this, other):
parents = [item.text for item in this.parents]
for entry in other:
if this == entry:
return None
return this
def expand(obj, queue):
block = [item.raw for item in obj.parents]
block.append(obj.raw)
current_level = queue
for b in block:
if b not in current_level:
current_level[b] = collections.OrderedDict()
current_level = current_level[b]
for c in obj.children:
if c.raw not in current_level:
current_level[c.raw] = collections.OrderedDict()
def flatten(data, obj):
for k, v in data.items():
obj.append(k)
flatten(v, obj)
return obj
def get_config(module):
config = module.params['config'] or dict()
if not config and not module.params['force']:
config = module.config
return config
def sort(val):
if isinstance(val, (list, set)):
return sorted(val)
return val
def diff(this, other, path=None):
updates = list()
path = path or list()
for key, value in this.items():
if key not in other:
other_value = other.get(key)
updates.append((list(path), key, value, other_value))
else:
if isinstance(this[key], dict):
path.append(key)
updates.extend(diff(this[key], other[key], list(path)))
path.pop()
else:
other_value = other.get(key)
if sort(this[key]) != sort(other_value):
updates.append((list(path), key, value, other_value))
return updates
def merge(changeset, config=None):
config = config or dict()
for path, key, value, _ in changeset:
current_level = config
for part in path:
if part not in current_level:
current_level[part] = dict()
current_level = current_level[part]
current_level[key] = value
return config
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='str'),
force=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(type='dict'),
)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = get_module(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False)
contents = get_config(module)
result['_backup'] = copy.deepcopy(module.config)
if module.params['transport'] in ['ssh', 'rest']:
config = contents
src = module.from_json(module.params['src'])
changeset = diff(src, config)
candidate = merge(changeset, config)
updates = dict()
for path, key, new_value, old_value in changeset:
path = '%s.%s' % ('.'.join(path), key)
updates[path] = str(new_value)
result['updates'] = updates
if changeset:
if not module.check_mode:
module.configure(config)
result['changed'] = True
else:
config = module.parse_config(config)
candidate = module.parse_config(module.params['src'])
commands = collections.OrderedDict()
toplevel = [c.text for c in config]
for line in candidate:
if line.text in ['!', '']:
continue
if not line.parents:
if line.text not in toplevel:
expand(line, commands)
else:
item = compare(line, config)
if item:
expand(item, commands)
commands = flatten(commands, list())
if commands:
if not module.check_mode:
commands = [str(c).strip() for c in commands]
response = module.configure(commands)
result['responses'] = response
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.shell import *
from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
|
HydrelioxGitHub/home-assistant
|
refs/heads/dev
|
tests/components/air_quality/test_air_quality.py
|
11
|
"""The tests for the Air Quality component."""
from homeassistant.components.air_quality import (
ATTR_ATTRIBUTION, ATTR_N2O,
ATTR_OZONE, ATTR_PM_10)
from homeassistant.setup import async_setup_component
async def test_state(hass):
"""Test Air Quality state."""
config = {
'air_quality': {
'platform': 'demo',
}
}
assert await async_setup_component(hass, 'air_quality', config)
state = hass.states.get('air_quality.demo_air_quality_home')
assert state is not None
assert state.state == '14'
async def test_attributes(hass):
"""Test Air Quality attributes."""
config = {
'air_quality': {
'platform': 'demo',
}
}
assert await async_setup_component(hass, 'air_quality', config)
state = hass.states.get('air_quality.demo_air_quality_office')
assert state is not None
data = state.attributes
assert data.get(ATTR_PM_10) == 16
assert data.get(ATTR_N2O) is None
assert data.get(ATTR_OZONE) is None
assert data.get(ATTR_ATTRIBUTION) == \
'Powered by Home Assistant'
|
yongshengwang/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/util/finddata.py
|
50
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
from __future__ import print_function
import os
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print("Directory %s ignored by pattern %s"
% (fn, pattern), file=sys.stderr)
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print("File %s ignored by pattern %s"
% (fn, pattern), file=sys.stderr)
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
if __name__ == '__main__':
import pprint
pprint.pprint(
find_package_data(show_ignored=True))
|
louisdorard/papiseval
|
refs/heads/master
|
BigmlKfold.py
|
1
|
import GenericKfold
from bigml.api import BigML
from bigml.model import Model
from bigml.api import check_resource
import numpy as np
import os
## Class for doing a k-fold cross validation test over BigML
class BigmlKfold(GenericKfold.GenericKfold):
def __init__(self, api):
self.api = api
## ## Method which is use to train a bigml regression or classification model
# @param self the object pointer
# @param inputs the inputs
# @param outputs the outputs
# @param train the integer array of positions for the data used for training
# @return a list containing the source, the dataset, the model (bigml objects) and the local model
def train_model(self, inputs, outputs, train):
# Create a file with the trained data
f = open("./data_train.csv", "w")
for x0, y0 in zip(inputs[train],outputs[train]):
y0 = np.array(y0)
line = ",".join(np.insert(x0, len(x0), y0))
f.write(line+"\n")
f.close()
# Use the training file created previously to train a BigML model
source = check_resource(self.api.create_source('./data_train.csv',
{
'term_analysis' : {"enabled": False},
'source_parser' : {"locale": "en-US"}
}), self.api.get_source)
dataset = check_resource(self.api.create_dataset(source), self.api.get_dataset)
model = check_resource(self.api.create_model(dataset), self.api.get_model)
local_model = Model(model)
return [source,dataset, model, local_model]
## Method which is use to make predictions using a bigml model
# @param self the object pointer
# @param model an object used to interact with the trained model
# @param inputs the inputs
# @param test the integer array of positions for the data used for testing
# @return a list of predictions for the test outputs given the test inputs
def make_predictions(self, model, inputs, test):
predictions_list = []
# Loop over the inputs in the test set to make predictions based on them
for x0 in inputs[test]:
# We build the input data for predictions
input_data = {}
for i in range(0, len(x0)):
input_data["field"+str(i+1)] = x0[i]
# Make prediction
prediction = model.predict(input_data)
# Add predictions for current model to the list
predictions_list.append(prediction)
return predictions_list
## Method to clean what has been created
# @param self the object pointer
# @param objects the objects needed to clean: source, dataset, model (bigml objects)
def clean(self, objects):
self.api.delete_source(objects.pop(0))
self.api.delete_dataset(objects.pop(0))
self.api.delete_model(objects.pop(0))
os.remove("./data_train.csv")
|
truongdq/chainer
|
refs/heads/master
|
chainer/functions/evaluation/accuracy.py
|
18
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Accuracy(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
t_type.shape[0] == x_type.shape[0],
)
for i in range(2, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
y = y.reshape(len(y), -1) # flatten
pred = y.argmax(axis=1)
return xp.asarray((pred == t).mean(dtype='f')),
def accuracy(y, t):
"""Computes muticlass classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose (i, j)-th element
indicates the score of the class j at the i-th example.
t (Variable): Variable holding an int32 vector of groundtruth labels.
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return Accuracy()(y, t)
|
praba230890/frappe
|
refs/heads/develop
|
frappe/website/doctype/web_form/test_web_form.py
|
49
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.website.render import build_page
from frappe.website.doctype.web_form.web_form import accept
test_records = frappe.get_test_records('Web Form')
class TestWebForm(unittest.TestCase):
def setUp(self):
frappe.conf.disable_website_cache = True
frappe.local.path = None
def tearDown(self):
frappe.conf.disable_website_cache = False
frappe.local.path = None
def test_basic(self):
frappe.set_user("Guest")
html = build_page("manage-events")
self.assertTrue("Please login to create a new Event" in html)
def test_logged_in(self):
frappe.set_user("Administrator")
html = build_page("manage-events")
self.assertFalse("Please login to create a new Event" in html)
self.assertTrue('"/manage-events?new=1"' in html)
def test_new(self):
frappe.set_user("Administrator")
frappe.local.form_dict.new = 1
html = build_page("manage-events")
self.assertTrue('name="subject"' in html)
def test_accept(self):
frappe.set_user("Administrator")
frappe.form_dict.web_form = "manage-events"
frappe.form_dict.doctype = "Event"
frappe.form_dict.subject = "_Test Event Web Form"
frappe.form_dict.description = "_Test Event Description"
frappe.form_dict.starts_on = "2014-09-09"
accept()
self.event_name = frappe.db.get_value("Event",
{"subject": "_Test Event Web Form"})
self.assertTrue(self.event_name)
def test_edit(self):
self.test_accept()
frappe.form_dict.web_form = "manage-events"
frappe.form_dict.doctype = "Event"
frappe.form_dict.name = self.event_name
frappe.form_dict.subject = "_Test Event Web Form"
frappe.form_dict.description = "_Test Event Description 1"
frappe.form_dict.starts_on = "2014-09-09"
self.assertNotEquals(frappe.db.get_value("Event",
self.event_name, "description"), frappe.form_dict.description)
accept()
self.assertEquals(frappe.db.get_value("Event",
self.event_name, "description"), frappe.form_dict.description)
|
huang195/kubernetes
|
refs/heads/master
|
hack/update_owners.py
|
40
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import re
import json
import os
import random
import subprocess
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, (name, owner, random_assignment) in enumerate(csv.reader(f)):
if n == 0:
continue # header
owners[normalize(name)] = (owner, int(random_assignment))
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned'])
sort_key = lambda (k, v): (k != 'DEFAULT', k) # put 'DEFAULT' first.
items = sorted(owners.items(), key=sort_key)
for name, (owner, random_assignment) in items:
out.writerow([name, owner, int(random_assignment)])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "kargakis", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "timstclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names.add('DEFAULT')
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment)
if options.user.lower() == 'random':
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
|
iivic/BoiseStateX
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_import.py
|
27
|
# -*- coding: utf-8 -*-
import datetime
import ddt
import unittest
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock, patch
from django.utils.timezone import UTC
from xmodule.xml_module import is_pointer_tag
from opaque_keys.edx.locations import Location
from xmodule.modulestore import only_xmodules
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, LibraryXMLModuleStore
from xmodule.modulestore.inheritance import compute_inherited_metadata
from xmodule.x_module import XModuleMixin
from xmodule.fields import Date
from xmodule.tests import DATA_DIR
from xmodule.modulestore.inheritance import InheritanceMixin
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xblock.core import XBlock
from xblock.fields import Scope, String, Integer
from xblock.runtime import KvsFieldData, DictKeyValueStore
ORG = 'test_org'
COURSE = 'test_course'
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
def __init__(self, load_error_modules, library=False):
if library:
xmlstore = LibraryXMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
else:
xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
course_id = SlashSeparatedCourseKey(ORG, COURSE, 'test_run')
course_dir = "test_dir"
error_tracker = Mock()
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=course_id,
course_dir=course_dir,
error_tracker=error_tracker,
load_error_modules=load_error_modules,
mixins=(InheritanceMixin, XModuleMixin),
field_data=KvsFieldData(DictKeyValueStore()),
)
def render_template(self, _template, _context):
raise Exception("Shouldn't be called")
class BaseCourseTestCase(unittest.TestCase):
'''Make sure module imports work properly, including for malformed inputs'''
@staticmethod
def get_system(load_error_modules=True, library=False):
'''Get a dummy system'''
return DummySystem(load_error_modules, library=library)
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(
DATA_DIR,
source_dirs=[name],
xblock_mixins=(InheritanceMixin,),
xblock_select=only_xmodules,
)
courses = modulestore.get_courses()
self.assertEquals(len(courses), 1)
return courses[0]
class GenericXBlock(XBlock):
"""XBlock for testing pure xblock xml import"""
has_children = True
field1 = String(default="something", scope=Scope.user_state)
field2 = Integer(scope=Scope.user_state)
@ddt.ddt
class PureXBlockImportTest(BaseCourseTestCase):
"""
Tests of import pure XBlocks (not XModules) from xml
"""
def assert_xblocks_are_good(self, block):
"""Assert a number of conditions that must be true for `block` to be good."""
scope_ids = block.scope_ids
self.assertIsNotNone(scope_ids.usage_id)
self.assertIsNotNone(scope_ids.def_id)
for child_id in block.children:
child = block.runtime.get_block(child_id)
self.assert_xblocks_are_good(child)
@XBlock.register_temp_plugin(GenericXBlock)
@ddt.data(
"<genericxblock/>",
"<genericxblock field1='abc' field2='23' />",
"<genericxblock field1='abc' field2='23'><genericxblock/></genericxblock>",
)
@patch('xmodule.x_module.XModuleMixin.location')
def test_parsing_pure_xblock(self, xml, mock_location):
system = self.get_system(load_error_modules=False)
descriptor = system.process_xml(xml)
self.assertIsInstance(descriptor, GenericXBlock)
self.assert_xblocks_are_good(descriptor)
self.assertFalse(mock_location.called)
class ImportTestCase(BaseCourseTestCase):
date = Date()
def test_fallback(self):
'''Check that malformed xml loads as an ErrorDescriptor.'''
# Use an exotic character to also flush out Unicode issues.
bad_xml = u'''<sequential display_name="oops\N{SNOWMAN}"><video url="hi"></sequential>'''
system = self.get_system()
descriptor = system.process_xml(bad_xml)
self.assertEqual(descriptor.__class__.__name__, 'ErrorDescriptorWithMixins')
def test_unique_url_names(self):
'''Check that each error gets its very own url_name'''
bad_xml = '''<sequential display_name="oops"><video url="hi"></sequential>'''
bad_xml2 = '''<sequential url_name="oops"><video url="hi"></sequential>'''
system = self.get_system()
descriptor1 = system.process_xml(bad_xml)
descriptor2 = system.process_xml(bad_xml2)
self.assertNotEqual(descriptor1.location, descriptor2.location)
# Check that each vertical gets its very own url_name
bad_xml = '''<vertical display_name="abc"><problem url_name="exam1:2013_Spring:abc"/></vertical>'''
bad_xml2 = '''<vertical display_name="abc"><problem url_name="exam2:2013_Spring:abc"/></vertical>'''
descriptor1 = system.process_xml(bad_xml)
descriptor2 = system.process_xml(bad_xml2)
self.assertNotEqual(descriptor1.location, descriptor2.location)
def test_reimport(self):
'''Make sure an already-exported error xml tag loads properly'''
self.maxDiff = None
bad_xml = '''<sequential display_name="oops"><video url="hi"></sequential>'''
system = self.get_system()
descriptor = system.process_xml(bad_xml)
node = etree.Element('unknown')
descriptor.add_xml_to_node(node)
re_import_descriptor = system.process_xml(etree.tostring(node))
self.assertEqual(re_import_descriptor.__class__.__name__, 'ErrorDescriptorWithMixins')
self.assertEqual(descriptor.contents, re_import_descriptor.contents)
self.assertEqual(descriptor.error_msg, re_import_descriptor.error_msg)
def test_fixed_xml_tag(self):
"""Make sure a tag that's been fixed exports as the original tag type"""
# create a error tag with valid xml contents
root = etree.Element('error')
good_xml = '''<sequential display_name="fixed"><video url="hi"/></sequential>'''
root.text = good_xml
xml_str_in = etree.tostring(root)
# load it
system = self.get_system()
descriptor = system.process_xml(xml_str_in)
# export it
node = etree.Element('unknown')
descriptor.add_xml_to_node(node)
# Now make sure the exported xml is a sequential
self.assertEqual(node.tag, 'sequential')
def course_descriptor_inheritance_check(self, descriptor, from_date_string, unicorn_color, url_name):
"""
Checks to make sure that metadata inheritance on a course descriptor is respected.
"""
# pylint: disable=protected-access
print(descriptor, descriptor._field_data)
self.assertEqual(descriptor.due, ImportTestCase.date.from_json(from_date_string))
# Check that the child inherits due correctly
child = descriptor.get_children()[0]
self.assertEqual(child.due, ImportTestCase.date.from_json(from_date_string))
# need to convert v to canonical json b4 comparing
self.assertEqual(
ImportTestCase.date.to_json(ImportTestCase.date.from_json(from_date_string)),
child.xblock_kvs.inherited_settings['due']
)
# Now export and check things
descriptor.runtime.export_fs = MemoryFS()
node = etree.Element('unknown')
descriptor.add_xml_to_node(node)
# Check that the exported xml is just a pointer
print("Exported xml:", etree.tostring(node))
self.assertTrue(is_pointer_tag(node))
# but it's a special case course pointer
self.assertEqual(node.attrib['course'], COURSE)
self.assertEqual(node.attrib['org'], ORG)
# Does the course still have unicorns?
with descriptor.runtime.export_fs.open('course/{url_name}.xml'.format(url_name=url_name)) as f:
course_xml = etree.fromstring(f.read())
self.assertEqual(course_xml.attrib['unicorn'], unicorn_color)
# the course and org tags should be _only_ in the pointer
self.assertNotIn('course', course_xml.attrib)
self.assertNotIn('org', course_xml.attrib)
# did we successfully strip the url_name from the definition contents?
self.assertNotIn('url_name', course_xml.attrib)
# Does the chapter tag now have a due attribute?
# hardcoded path to child
with descriptor.runtime.export_fs.open('chapter/ch.xml') as f:
chapter_xml = etree.fromstring(f.read())
self.assertEqual(chapter_xml.tag, 'chapter')
self.assertNotIn('due', chapter_xml.attrib)
def test_metadata_import_export(self):
"""Two checks:
- unknown metadata is preserved across import-export
- inherited metadata doesn't leak to children.
"""
system = self.get_system()
from_date_string = 'March 20 17:00'
url_name = 'test1'
unicorn_color = 'purple'
start_xml = '''
<course org="{org}" course="{course}"
due="{due}" url_name="{url_name}" unicorn="{unicorn_color}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>'''.format(
due=from_date_string, org=ORG, course=COURSE, url_name=url_name, unicorn_color=unicorn_color
)
descriptor = system.process_xml(start_xml)
compute_inherited_metadata(descriptor)
self.course_descriptor_inheritance_check(descriptor, from_date_string, unicorn_color, url_name)
def test_library_metadata_import_export(self):
"""Two checks:
- unknown metadata is preserved across import-export
- inherited metadata doesn't leak to children.
"""
system = self.get_system(library=True)
from_date_string = 'March 26 17:00'
url_name = 'test2'
unicorn_color = 'rainbow'
start_xml = '''
<library org="TestOrg" library="TestLib" display_name="stuff">
<course org="{org}" course="{course}"
due="{due}" url_name="{url_name}" unicorn="{unicorn_color}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
</library>'''.format(
due=from_date_string, org=ORG, course=COURSE, url_name=url_name, unicorn_color=unicorn_color
)
descriptor = system.process_xml(start_xml)
# pylint: disable=protected-access
original_unwrapped = descriptor._unwrapped_field_data
LibraryXMLModuleStore.patch_descriptor_kvs(descriptor)
# '_unwrapped_field_data' is reset in `patch_descriptor_kvs`
# pylint: disable=protected-access
self.assertIsNot(original_unwrapped, descriptor._unwrapped_field_data)
compute_inherited_metadata(descriptor)
# Check the course module, since it has inheritance
descriptor = descriptor.get_children()[0]
self.course_descriptor_inheritance_check(descriptor, from_date_string, unicorn_color, url_name)
def test_metadata_no_inheritance(self):
"""
Checks that default value of None (for due) does not get marked as inherited when a
course is the root block.
"""
system = self.get_system()
url_name = 'test1'
start_xml = '''
<course org="{org}" course="{course}"
url_name="{url_name}" unicorn="purple">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>'''.format(org=ORG, course=COURSE, url_name=url_name)
descriptor = system.process_xml(start_xml)
compute_inherited_metadata(descriptor)
self.course_descriptor_no_inheritance_check(descriptor)
def test_library_metadata_no_inheritance(self):
"""
Checks that the default value of None (for due) does not get marked as inherited when a
library is the root block.
"""
system = self.get_system()
url_name = 'test1'
start_xml = '''
<library org="TestOrg" library="TestLib" display_name="stuff">
<course org="{org}" course="{course}"
url_name="{url_name}" unicorn="purple">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
</library>'''.format(org=ORG, course=COURSE, url_name=url_name)
descriptor = system.process_xml(start_xml)
LibraryXMLModuleStore.patch_descriptor_kvs(descriptor)
compute_inherited_metadata(descriptor)
# Run the checks on the course node instead.
descriptor = descriptor.get_children()[0]
self.course_descriptor_no_inheritance_check(descriptor)
def course_descriptor_no_inheritance_check(self, descriptor):
"""
Verifies that a default value of None (for due) does not get marked as inherited.
"""
self.assertEqual(descriptor.due, None)
# Check that the child does not inherit a value for due
child = descriptor.get_children()[0]
self.assertEqual(child.due, None)
# Check that the child hasn't started yet
self.assertLessEqual(
datetime.datetime.now(UTC()),
child.start
)
def override_metadata_check(self, descriptor, child, course_due, child_due):
"""
Verifies that due date can be overriden at child level.
"""
self.assertEqual(descriptor.due, ImportTestCase.date.from_json(course_due))
self.assertEqual(child.due, ImportTestCase.date.from_json(child_due))
# Test inherited metadata. Due does not appear here (because explicitly set on child).
self.assertEqual(
ImportTestCase.date.to_json(ImportTestCase.date.from_json(course_due)),
child.xblock_kvs.inherited_settings['due']
)
def test_metadata_override_default(self):
"""
Checks that due date can be overriden at child level when a course is the root.
"""
system = self.get_system()
course_due = 'March 20 17:00'
child_due = 'April 10 00:00'
url_name = 'test1'
start_xml = '''
<course org="{org}" course="{course}"
due="{due}" url_name="{url_name}" unicorn="purple">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>'''.format(due=course_due, org=ORG, course=COURSE, url_name=url_name)
descriptor = system.process_xml(start_xml)
child = descriptor.get_children()[0]
# pylint: disable=protected-access
child._field_data.set(child, 'due', child_due)
compute_inherited_metadata(descriptor)
self.override_metadata_check(descriptor, child, course_due, child_due)
def test_library_metadata_override_default(self):
"""
Checks that due date can be overriden at child level when a library is the root.
"""
system = self.get_system()
course_due = 'March 20 17:00'
child_due = 'April 10 00:00'
url_name = 'test1'
start_xml = '''
<library org="TestOrg" library="TestLib" display_name="stuff">
<course org="{org}" course="{course}"
due="{due}" url_name="{url_name}" unicorn="purple">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
</library>'''.format(due=course_due, org=ORG, course=COURSE, url_name=url_name)
descriptor = system.process_xml(start_xml)
LibraryXMLModuleStore.patch_descriptor_kvs(descriptor)
# Chapter is two levels down here.
child = descriptor.get_children()[0].get_children()[0]
# pylint: disable=protected-access
child._field_data.set(child, 'due', child_due)
compute_inherited_metadata(descriptor)
descriptor = descriptor.get_children()[0]
self.override_metadata_check(descriptor, child, course_due, child_due)
def test_is_pointer_tag(self):
"""
Check that is_pointer_tag works properly.
"""
yes = ["""<html url_name="blah"/>""",
"""<html url_name="blah"></html>""",
"""<html url_name="blah"> </html>""",
"""<problem url_name="blah"/>""",
"""<course org="HogwartsX" course="Mathemagics" url_name="3.14159"/>"""]
no = ["""<html url_name="blah" also="this"/>""",
"""<html url_name="blah">some text</html>""",
"""<problem url_name="blah"><sub>tree</sub></problem>""",
"""<course org="HogwartsX" course="Mathemagics" url_name="3.14159">
<chapter>3</chapter>
</course>
"""]
for xml_str in yes:
print "should be True for {0}".format(xml_str)
self.assertTrue(is_pointer_tag(etree.fromstring(xml_str)))
for xml_str in no:
print "should be False for {0}".format(xml_str)
self.assertFalse(is_pointer_tag(etree.fromstring(xml_str)))
def test_metadata_inherit(self):
"""Make sure that metadata is inherited properly"""
print "Starting import"
course = self.get_course('toy')
def check_for_key(key, node, value):
"recursive check for presence of key"
print "Checking {0}".format(node.location.to_deprecated_string())
self.assertEqual(getattr(node, key), value)
for c in node.get_children():
check_for_key(key, c, value)
check_for_key('graceperiod', course, course.graceperiod)
def test_policy_loading(self):
"""Make sure that when two courses share content with the same
org and course names, policy applies to the right one."""
toy = self.get_course('toy')
two_toys = self.get_course('two_toys')
self.assertEqual(toy.url_name, "2012_Fall")
self.assertEqual(two_toys.url_name, "TT_2012_Fall")
toy_ch = toy.get_children()[0]
two_toys_ch = two_toys.get_children()[0]
self.assertEqual(toy_ch.display_name, "Overview")
self.assertEqual(two_toys_ch.display_name, "Two Toy Overview")
# Also check that the grading policy loaded
self.assertEqual(two_toys.grade_cutoffs['C'], 0.5999)
# Also check that keys from policy are run through the
# appropriate attribute maps -- 'graded' should be True, not 'true'
self.assertEqual(toy.graded, True)
def test_definition_loading(self):
"""When two courses share the same org and course name and
both have a module with the same url_name, the definitions shouldn't clash.
TODO (vshnayder): once we have a CMS, this shouldn't
happen--locations should uniquely name definitions. But in
our imperfect XML world, it can (and likely will) happen."""
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['toy', 'two_toys'])
location = Location("edX", "toy", "2012_Fall", "video", "Welcome", None)
toy_video = modulestore.get_item(location)
location_two = Location("edX", "toy", "TT_2012_Fall", "video", "Welcome", None)
two_toy_video = modulestore.get_item(location_two)
self.assertEqual(toy_video.youtube_id_1_0, "p2Q6BrNhdh8")
self.assertEqual(two_toy_video.youtube_id_1_0, "p2Q6BrNhdh9")
def test_colon_in_url_name(self):
"""Ensure that colons in url_names convert to file paths properly"""
print "Starting import"
# Not using get_courses because we need the modulestore object too afterward
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['toy'])
courses = modulestore.get_courses()
self.assertEquals(len(courses), 1)
course = courses[0]
print "course errors:"
for (msg, err) in modulestore.get_course_errors(course.id):
print msg
print err
chapters = course.get_children()
self.assertEquals(len(chapters), 5)
ch2 = chapters[1]
self.assertEquals(ch2.url_name, "secret:magic")
print "Ch2 location: ", ch2.location
also_ch2 = modulestore.get_item(ch2.location)
self.assertEquals(ch2, also_ch2)
print "making sure html loaded"
loc = course.id.make_usage_key('html', 'secret:toylab')
html = modulestore.get_item(loc)
self.assertEquals(html.display_name, "Toy lab")
def test_unicode(self):
"""Check that courses with unicode characters in filenames and in
org/course/name import properly. Currently, this means: (a) Having
files with unicode names does not prevent import; (b) if files are not
loaded because of unicode filenames, there are appropriate
exceptions/errors to that effect."""
print "Starting import"
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['test_unicode'])
courses = modulestore.get_courses()
self.assertEquals(len(courses), 1)
course = courses[0]
print "course errors:"
# Expect to find an error/exception about characters in "®esources"
expect = "InvalidKeyError"
errors = [
(msg.encode("utf-8"), err.encode("utf-8"))
for msg, err
in modulestore.get_course_errors(course.id)
]
self.assertTrue(any(
expect in msg or expect in err
for msg, err in errors
))
chapters = course.get_children()
self.assertEqual(len(chapters), 4)
def test_url_name_mangling(self):
"""
Make sure that url_names are only mangled once.
"""
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['toy'])
toy_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
course = modulestore.get_course(toy_id)
chapters = course.get_children()
ch1 = chapters[0]
sections = ch1.get_children()
self.assertEqual(len(sections), 4)
for i in (2, 3):
video = sections[i]
# Name should be 'video_{hash}'
print "video {0} url_name: {1}".format(i, video.url_name)
self.assertEqual(len(video.url_name), len('video_') + 12)
def test_poll_and_conditional_import(self):
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['conditional_and_poll'])
course = modulestore.get_courses()[0]
chapters = course.get_children()
ch1 = chapters[0]
sections = ch1.get_children()
self.assertEqual(len(sections), 1)
conditional_location = course.id.make_usage_key('conditional', 'condone')
module = modulestore.get_item(conditional_location)
self.assertEqual(len(module.children), 1)
poll_location = course.id.make_usage_key('poll_question', 'first_poll')
module = modulestore.get_item(poll_location)
self.assertEqual(len(module.get_children()), 0)
self.assertEqual(module.voted, False)
self.assertEqual(module.poll_answer, '')
self.assertEqual(module.poll_answers, {})
self.assertEqual(
module.answers,
[
{'text': u'Yes', 'id': 'Yes'},
{'text': u'No', 'id': 'No'},
{'text': u"Don't know", 'id': 'Dont_know'}
]
)
def test_error_on_import(self):
'''Check that when load_error_module is false, an exception is raised, rather than returning an ErrorModule'''
bad_xml = '''<sequential display_name="oops"><video url="hi"></sequential>'''
system = self.get_system(False)
self.assertRaises(etree.XMLSyntaxError, system.process_xml, bad_xml)
def test_graphicslidertool_import(self):
'''
Check to see if definition_from_xml in gst_module.py
works properly. Pulls data from the graphic_slider_tool directory
in the test data directory.
'''
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['graphic_slider_tool'])
sa_id = SlashSeparatedCourseKey("edX", "gst_test", "2012_Fall")
location = sa_id.make_usage_key("graphical_slider_tool", "sample_gst")
gst_sample = modulestore.get_item(location)
render_string_from_sample_gst_xml = """
<slider var="a" style="width:400px;float:left;"/>\
<plot style="margin-top:15px;margin-bottom:15px;"/>""".strip()
self.assertIn(render_string_from_sample_gst_xml, gst_sample.data)
def test_word_cloud_import(self):
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['word_cloud'])
course = modulestore.get_courses()[0]
chapters = course.get_children()
ch1 = chapters[0]
sections = ch1.get_children()
self.assertEqual(len(sections), 1)
location = course.id.make_usage_key('word_cloud', 'cloud1')
module = modulestore.get_item(location)
self.assertEqual(len(module.get_children()), 0)
self.assertEqual(module.num_inputs, 5)
self.assertEqual(module.num_top_words, 250)
def test_cohort_config(self):
"""
Check that cohort config parsing works right.
Note: The cohort config on the CourseModule is no longer used.
See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
modulestore = XMLModuleStore(DATA_DIR, source_dirs=['toy'])
toy_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
course = modulestore.get_course(toy_id)
# No config -> False
self.assertFalse(course.is_cohorted)
# empty config -> False
course.cohort_config = {}
self.assertFalse(course.is_cohorted)
# false config -> False
course.cohort_config = {'cohorted': False}
self.assertFalse(course.is_cohorted)
# and finally...
course.cohort_config = {'cohorted': True}
self.assertTrue(course.is_cohorted)
|
sbfnk/epiwork-website
|
refs/heads/master
|
apps/pollster/management/commands/epiweb_migrate_intake.py
|
1
|
from optparse import make_option
from django.core.management.base import CommandError, BaseCommand
import cPickle as pickle
import MySQLdb
def yearmonth(date):
if not date:
return ''
return "%d/%02d" % (date.year, date.month)
def setmulti(intake, dest, data, src, index):
if src not in data:
return
val = int(data[src][index-1])
if val != 99:
setattr(intake, "%s_%d" % (dest, index), True)
setattr(intake, "%s_%d_open" % (dest, index), val)
class Command(BaseCommand):
help = 'Register a question data type.'
option_list = BaseCommand.option_list + (
make_option(None, '--host', action='store', type="string",
dest='host',
help='Source database host'),
make_option('-p', '--password', action='store', type="string",
dest='password',
help='Source database password'),
make_option('-d', '--database', action='store', type="string",
dest='database',
help='Source database name'),
make_option('-u', '--username', action='store', type="string",
dest='username',
help='User name to connect to the source database'),
)
def convert(self, data, intake):
intake.Q1 = data["IntakeQ1"]
intake.Q2 = yearmonth(data["IntakeQ2"])
intake.Q3 = data["IntakeQ3"]
intake.Q4b = data["IntakeQ4"]
if data.get("IntakeQ5"):
intake.Q5_1 = 1 in data["IntakeQ5"]
intake.Q5_2 = 2 in data["IntakeQ5"]
intake.Q5_3 = 3 in data["IntakeQ5"]
setmulti(intake, "Q6", data, "IntakeQ6", 1)
setmulti(intake, "Q6", data, "IntakeQ6", 2)
setmulti(intake, "Q6", data, "IntakeQ6", 3)
setmulti(intake, "Q6", data, "IntakeQ6", 4)
setmulti(intake, "Q6", data, "IntakeQ6", 5)
intake.Q6b = data.get("IntakeQ6b")
if data.get("IntakeQ7"):
intake.Q7_0 = data["IntakeQ7"] == 0
intake.Q7_1 = data["IntakeQ7"] == 1
intake.Q7_2 = data["IntakeQ7"] == 2
intake.Q7_3 = data["IntakeQ7"] == 3
intake.Q7_4 = data["IntakeQ7"] == 4
intake.Q7_5 = data["IntakeQ7"] == 5
intake.Q7b = data["IntakeQ7b"]
if intake.Q7b == 4:
intake.Q7b = 3
if intake.Q7b == 5:
intake.Q7b = 4
intake.Q9 = data["IntakeQ8"]
intake.Q10 = data["IntakeQ10"]
q10b = data.get("IntakeQ10b")
if q10b:
intake.Q10b = '1'
intake.Q10b_1_open = q10b
else:
intake.Q10b = '0'
if data.get("IntakeQ10c"):
intake.Q10c_0 = 0 in data["IntakeQ10c"]
intake.Q10c_1 = 1 in data["IntakeQ10c"]
intake.Q10c_2 = 2 in data["IntakeQ10c"]
intake.Q10c_3 = 3 in data["IntakeQ10c"]
intake.Q10c_4 = 4 in data["IntakeQ10c"]
intake.Q10c_5 = 5 in data["IntakeQ10c"]
intake.Q10c_6 = 6 in data["IntakeQ10c"]
intake.Q10c_7 = 7 in data["IntakeQ10c"]
intake.Q10c_8 = 8 in data["IntakeQ10c"]
intake.Q10c_9 = 9 in data["IntakeQ10c"]
if data.get("IntakeQ12"):
intake.Q11_1 = 0 in data["IntakeQ12"]
intake.Q11_2 = 1 in data["IntakeQ12"]
intake.Q11_3 = 2 in data["IntakeQ12"]
intake.Q11_4 = 3 in data["IntakeQ12"]
intake.Q11_5 = 4 in data["IntakeQ12"]
intake.Q11_6 = 5 in data["IntakeQ12"]
intake.Q12 = data["IntakeQ13"]
intake.Q12b = data["IntakeQ13b"]
q14 = data.get("IntakeQ14")
if q14 == 0:
intake.Q13 = 0
elif q14 == 1:
intake.Q13 = 2
elif q14 == 2:
intake.Q13 = 3
elif q14 == 3:
intake.Q13 = 4
if data.get("IntakeQ15"):
intake.Q14_0 = 0 in data["IntakeQ15"]
intake.Q14_1 = 1 in data["IntakeQ15"]
intake.Q14_2 = 2 in data["IntakeQ15"]
intake.Q14_3 = 3 in data["IntakeQ15"]
intake.Q8 = data["IntakeQ18"]
intake.Q15_0 = data["IntakeQ16"] == 0
intake.Q15_1 = data["IntakeQ16"] == 1
intake.Q15_2 = data["IntakeQ16"] == 2
intake.Q15_3 = data["IntakeQ16"] == 3
intake.Q15_4 = data["IntakeQ16"] == 4
if data.get("IntakeQ17"):
intake.Q16_0 = 0 in data["IntakeQ17"]
intake.Q16_1 = 1 in data["IntakeQ17"]
intake.Q16_2 = 2 in data["IntakeQ17"]
intake.Q16_3 = 3 in data["IntakeQ17"]
intake.Q16_4 = 4 in data["IntakeQ17"]
def load_profiles(self, options):
verbosity = options.get('verbosity')
database = options.get('database')
host = options.get('host') or ''
username = options.get('username')
password = options.get('password') or ''
db = MySQLdb.connect(user=username, passwd=password, host=host, db=database)
cursor = db.cursor()
surveyusers = {}
cursor.execute("""
SELECT SU.id, SUU.user_id, SU.global_id, SU.deleted, SU.last_participation_id, SU.last_participation_date, SU.name
FROM survey_surveyuser SU, survey_surveyuser_user SUU
WHERE SUU.surveyuser_id = SU.id""")
for surveyuser in cursor.fetchall():
this = {
"id": surveyuser[0],
"user_id": surveyuser[1],
"global_id": surveyuser[2],
"deleted": surveyuser[3],
"last_participation_id": surveyuser[4],
"last_participation_date": surveyuser[5],
"name": surveyuser[6]
}
surveyusers[this["id"]] = this
profiles = []
cursor.execute("""
SELECT user_id, updated, survey_id, data
FROM survey_profile""")
for profile in cursor.fetchall():
this = {
"user_id": profile[0],
"updated": profile[1],
"survey_id": profile[2],
"data": profile[3]
}
if this["data"]:
this["data"] = pickle.loads(this["data"])
this["user"] = surveyusers.get(this["user_id"])
if verbosity > 0 and not this["user"]:
self.stderr.write("missing user %s\n" % (this["user_id"], ))
profiles.append(this)
return profiles
def handle(self, *args, **options):
from apps.pollster import models
verbosity = int(options.get('verbosity'))
if 'database' not in options:
raise CommandError("you need to specify the source database")
Intake = models.Survey.get_by_shortname('intake').as_model()
profiles = self.load_profiles(options)
count = 0
for p in self.load_profiles(options):
count += 1
if verbosity > 1:
self.stdout.write("importing %s of %s\n" % (count, len(profiles)))
u = p["user"]
if u and verbosity > 2:
self.stdout.write("%s (user %s, global_id %s)\n" % (u["name"], u["user_id"], u["global_id"]))
if p["updated"]:
data = p["data"]
if verbosity > 2:
self.stdout.write(" compiled on %s: %s\n" % (p["updated"], data))
intake = Intake()
intake.user = u["user_id"]
intake.global_id = u["global_id"]
intake.timestamp = p["updated"]
intake.channel = 'migrate-intake'
self.convert(data, intake)
intake.save()
|
chafique-delli/OpenUpgrade
|
refs/heads/master
|
addons/website_forum_doc/models/documentation.py
|
52
|
# -*- coding: utf-8 -*-
import openerp
from openerp.osv import osv, fields
class Documentation(osv.Model):
_name = 'forum.documentation.toc'
_description = 'Documentation ToC'
_inherit = ['website.seo.metadata']
_order = "parent_left"
_parent_order = "sequence, name"
_parent_store = True
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'sequence': fields.integer('Sequence'),
'display_name': fields.function(_name_get_fnc, type="char", string='Full Name'),
'name': fields.char('Name', required=True, translate=True),
'introduction': fields.html('Introduction', translate=True),
'parent_id': fields.many2one('forum.documentation.toc', 'Parent Table Of Content', ondelete='cascade'),
'child_ids': fields.one2many('forum.documentation.toc', 'parent_id', 'Children Table Of Content'),
'parent_left': fields.integer('Left Parent', select=True),
'parent_right': fields.integer('Right Parent', select=True),
'post_ids': fields.one2many('forum.post', 'documentation_toc_id', 'Posts'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class DocumentationStage(osv.Model):
_name = 'forum.documentation.stage'
_description = 'Post Stage'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Stage Name', required=True, translate=True),
}
class Post(osv.Model):
_inherit = 'forum.post'
_columns = {
'documentation_toc_id': fields.many2one('forum.documentation.toc', 'Documentation ToC', ondelete='set null'),
'documentation_stage_id': fields.many2one('forum.documentation.stage', 'Documentation Stage'),
'color': fields.integer('Color Index')
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('forum.documentation.stage')
stage_ids = stage_obj.search(cr, uid, [], context=context)
result = stage_obj.name_get(cr, uid, stage_ids, context=context)
return result, {}
_group_by_full = {
'documentation_stage_id': _read_group_stage_ids,
}
|
Balachan27/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex_multi_apps/app1/4_auto.py
|
385
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "3_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
RudolfCardinal/crate
|
refs/heads/master
|
crate_anon/crateweb/research/migrations/0004_auto_20170212_0137.py
|
1
|
#!/usr/bin/env python
"""
crate_anon/crateweb/research/migrations/0004_auto_20170212_0137.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <http://www.gnu.org/licenses/>.
===============================================================================
**Research app, migration 0004.**
"""
# Generated by Django 1.10.5 on 2017-02-12 01:37
from __future__ import unicode_literals
from cardinal_pythonlib.django.fields.jsonclassfield import JsonClassField
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('research', '0003_patientexplorer_patientexploreraudit'),
]
operations = [
migrations.AlterField(
model_name='patientexplorer',
name='patient_multiquery',
field=JsonClassField(null=True, verbose_name='PatientMultiQuery as JSON'), # noqa
),
migrations.AlterField(
model_name='query',
name='args',
field=JsonClassField(null=True, verbose_name='SQL arguments (as JSON)'), # noqa
),
]
|
code4hk/hk-news-scrapper
|
refs/heads/master
|
parsers/baseparser.py
|
1
|
from bs4 import BeautifulSoup
from util import logger
import asyncio
import aiohttp
import re
log = logger.get(__name__)
@asyncio.coroutine
def grab_url(url, max_retry=5):
text = None
retry = False
try:
# todo:
# TimeoutError: [Errno 60] Operation timed out
# Fatal read error on socket transport
response = yield from aiohttp.request('GET', url)
text = yield from response.read()
assert response.status == 200
except (AssertionError, aiohttp.ClientOSError, aiohttp.ClientResponseError):
yield from asyncio.sleep(6-max_retry)
retry = True
if retry:
if max_retry == 0:
raise RuntimeError('Too many attempts to download %s' % url)
return (yield from grab_url(url, max_retry - 1))
log.debug('Retrieved %s', url)
return text
class BaseParser(object):
code = None
name = None
domain = None
feeder_pattern = ''
feeder_pages = [] # index page for news
date = None
title = None
body = None
lang = 'zh_Hant'
encoding = 'utf-8'
real_article = True # If set to False, ignore this article
def __init__(self, url):
self.url = url
@asyncio.coroutine
def parse(self):
html = yield from grab_url(self.url)
self._parse(html)
return self
def _parse(self, html):
"""Should take html and populate self.(date, title, byline, body)
If the article isn't valid, set self.real_article to False and return.
"""
raise NotImplementedError()
def __str__(self):
return u'\n'.join((self.date, self.title, self.body,))
@classmethod
def soup(cls, html):
return BeautifulSoup(html, from_encoding=cls.encoding)
@classmethod
@asyncio.coroutine
def _get_all_page(cls, url):
"""Take the article list url and return a list of urls corresponding to all pages
"""
raise NotImplementedError()
@classmethod
@asyncio.coroutine
def feed_urls(cls):
all_urls = []
coroutines = [cls._get_all_page(feeder_url) for feeder_url in cls.feeder_pages]
for coroutine in asyncio.as_completed(coroutines):
for page in (yield from coroutine):
try:
source = yield from grab_url(page)
urls = [a.get('href') or '' for a in cls.soup(source).findAll('a')]
urls = [url if '://' in url else "http://{}{}".format(cls.domain, url) for url in urls]
all_urls += [url for url in urls if re.search(cls.feeder_pattern, url)]
except RuntimeError:
log.info("Can't load page {}. skipping...".format(page))
return all_urls
|
ArmandSyah/AnimeMessengerBot
|
refs/heads/master
|
reddit/message.py
|
2
|
import os
from anime.mal import mal_search_methods
from anime.mal.mal_anime import MalAnime
from anime.anilist import anilist_search_methods
from anime.anidb import anidb_search_methods
from anime.streams import stream_search_methods
def make_message(title):
"""Constructs message to be sent to reddit user"""
anime_info = _set_up(title)
comment = _construct_comment(anime_info)
return comment
def _set_up(title):
mal_url = mal_search_methods.get_mal_links(title)
anilist_url = anilist_search_methods.get_anilist_links(title)
anidb_url = anidb_search_methods.get_anidb_links(title)
anime = MalAnime(mal_url)
crunchyroll_url = (stream_search_methods.search_crunchyroll(anime.english_name)
if anime.english_name is not None
else stream_search_methods.search_crunchyroll(title))
funimation_url = (stream_search_methods.search_funimation(anime.english_name)
if anime.english_name is not None
else stream_search_methods.search_funimation(title))
animelab_url = (stream_search_methods.search_animelab(anime.english_name)
if anime.english_name is not None
else stream_search_methods.search_animelab(title))
comment_info_dict = {'mal_url': mal_url,
'anilist_url': anilist_url,
'anidb_url': anidb_url,
'crunchy': crunchyroll_url,
'funi': funimation_url,
'animelab': animelab_url,
'anime': anime}
return comment_info_dict
def _construct_comment(anime_info):
comment = []
anime = anime_info['anime']
comment.append(f'# {anime.main_name} \n')
comment.append('***** \n')
comment.append(f'**Names:**\n')
if anime.english_name is not None:
comment.append(f'* English: {anime.english_name}')
if anime.japanese_name is not None:
comment.append(f'* Japanese: {anime.japanese_name}')
if anime.synonyms is not None:
comment.append(f'* Synonyms: {" , ".join(anime.synonyms)}')
comment.append(f'\n**Show Information:**\n')
if anime_info["anilist_url"] is not None:
comment.append(f'* [Anilist]({anime_info["anilist_url"]})')
if anime_info["mal_url"] is not None:
comment.append(f'* [MyAnimeList]({anime_info["mal_url"]})')
if anime_info["anidb_url"] is not None:
comment.append(f'* [AniDB]({anime_info["anidb_url"]})')
comment.append(f'\n**Streams:**\n')
if anime_info["crunchy"] is not None:
comment.append(f'* [Crunchyroll]({anime_info["crunchy"]})')
if anime_info["funi"] is not None:
comment.append(f'* [Funimation]({anime_info["funi"]})')
if anime_info["animelab"] is not None:
comment.append(f'* [Animelab (for Aus and NZ)]({anime_info["animelab"]})')
comment.append('\n***** \n')
comment.append(f'## Synopsis:\n')
comment.append(f'{anime.synopsis}\n')
comment.append('\n***** \n')
comment.append(f'**Episodes:** {anime.episodes} |**Source:** {anime.source} | **Airdate:** {anime.airdate} | '
f'**Duration:** {anime.duration} |**Status:** {anime.status} | **Type:** {anime.anime_type} | '
f'**Rating:** {anime.rating}/10 | **Genres:** {", ".join(anime.genres)}')
return '\n'.join(comment)
def main():
print('Grimoire of Zero:\n {}'.format(make_message('Re Zero')))
if __name__ == '__main__':
os.chdir('\\'.join(os.getcwd().split('\\')[:-1]))
main()
|
kaffeel/oppia
|
refs/heads/develop
|
core/controllers/cron.py
|
15
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the cron jobs."""
import logging
from core import jobs
from core.controllers import base
from core.platform import models
email_services = models.Registry.import_email_services()
(job_models,) = models.Registry.import_models([models.NAMES.job])
import feconf
import utils
from pipeline import pipeline
# The default retention time is 2 days.
MAX_MAPREDUCE_METADATA_RETENTION_MSECS = 2 * 24 * 60 * 60 * 1000
def require_cron_or_superadmin(handler):
"""Decorator to ensure that the handler is being called by cron or by a
superadmin of the application.
"""
def _require_cron_or_superadmin(self, *args, **kwargs):
if (self.request.headers.get('X-AppEngine-Cron') is None
and not self.is_super_admin):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
else:
return handler(self, *args, **kwargs)
return _require_cron_or_superadmin
class JobStatusMailerHandler(base.BaseHandler):
"""Handler for mailing admin about job failures."""
@require_cron_or_superadmin
def get(self):
"""Handles GET requests."""
TWENTY_FIVE_HOURS_IN_MSECS = 25 * 60 * 60 * 1000
MAX_JOBS_TO_REPORT_ON = 50
# TODO(sll): Get the 50 most recent failed shards, not all of them.
failed_jobs = jobs.get_stuck_jobs(TWENTY_FIVE_HOURS_IN_MSECS)
if failed_jobs:
email_subject = 'MapReduce failure alert'
email_message = (
'%s jobs have failed in the past 25 hours. More information '
'(about at most %s jobs; to see more, please check the logs):'
) % (len(failed_jobs), MAX_JOBS_TO_REPORT_ON)
for job in failed_jobs[:MAX_JOBS_TO_REPORT_ON]:
email_message += '\n'
email_message += '-----------------------------------'
email_message += '\n'
email_message += (
'Job with mapreduce ID %s (key name %s) failed. '
'More info:\n\n'
' counters_map: %s\n'
' shard_retries: %s\n'
' slice_retries: %s\n'
' last_update_time: %s\n'
' last_work_item: %s\n'
) % (
job.mapreduce_id, job.key().name(), job.counters_map,
job.retries, job.slice_retries, job.update_time,
job.last_work_item
)
else:
email_subject = 'MapReduce status report'
email_message = 'All MapReduce jobs are running fine.'
email_services.send_mail_to_admin(email_subject, email_message)
class CronMapreduceCleanupHandler(base.BaseHandler):
def get(self):
"""Clean up intermediate data items for completed M/R jobs that
started more than MAX_MAPREDUCE_METADATA_RETENTION_MSECS milliseconds
ago.
Map/reduce runs leave around a large number of rows in several
tables. This data is useful to have around for a while:
- it helps diagnose any problems with jobs that may be occurring
- it shows where resource usage is occurring
However, after a few days, this information is less relevant, and
should be cleaned up.
"""
recency_msec = MAX_MAPREDUCE_METADATA_RETENTION_MSECS
num_cleaned = 0
min_age_msec = recency_msec
# Only consider jobs that started at most 1 week before recency_msec.
max_age_msec = recency_msec + 7 * 24 * 60 * 60 * 1000
# The latest start time that a job scheduled for cleanup may have.
max_start_time_msec = (
utils.get_current_time_in_millisecs() - min_age_msec)
# Get all pipeline ids from jobs that started between max_age_msecs
# and max_age_msecs + 1 week, before now.
pipeline_id_to_job_instance = {}
job_instances = job_models.JobModel.get_recent_jobs(1000, max_age_msec)
for job_instance in job_instances:
if (job_instance.time_started_msec < max_start_time_msec and not
job_instance.has_been_cleaned_up):
if 'root_pipeline_id' in job_instance.metadata:
pipeline_id = job_instance.metadata['root_pipeline_id']
pipeline_id_to_job_instance[pipeline_id] = job_instance
# Clean up pipelines.
for pline in pipeline.get_root_list()['pipelines']:
pipeline_id = pline['pipelineId']
job_definitely_terminated = (
pline['status'] == 'done' or
pline['status'] == 'aborted' or
pline['currentAttempt'] > pline['maxAttempts'])
have_start_time = 'startTimeMs' in pline
job_started_too_long_ago = (
have_start_time and
pline['startTimeMs'] < max_start_time_msec)
if (job_started_too_long_ago or
(not have_start_time and job_definitely_terminated)):
# At this point, the map/reduce pipeline is either in a
# terminal state, or has taken so long that there's no
# realistic possibility that there might be a race condition
# between this and the job actually completing.
if pipeline_id in pipeline_id_to_job_instance:
job_instance = pipeline_id_to_job_instance[pipeline_id]
job_instance.has_been_cleaned_up = True
job_instance.put()
# This enqueues a deferred cleanup item.
p = pipeline.Pipeline.from_id(pipeline_id)
if p:
p.cleanup()
num_cleaned += 1
logging.warning('%s MR jobs cleaned up.' % num_cleaned)
if job_models.JobModel.do_unfinished_jobs_exist(
jobs.JobCleanupManager.__name__):
logging.warning('A previous cleanup job is still running.')
else:
jobs.JobCleanupManager.enqueue(
jobs.JobCleanupManager.create_new(), additional_job_params={
jobs.MAPPER_PARAM_MAX_START_TIME_MSEC: max_start_time_msec
})
logging.warning('Deletion jobs for auxiliary entities kicked off.')
|
DorRosenblum/tf_flstm_f-lm
|
refs/heads/master
|
language_model.py
|
1
|
import tensorflow as tf
from model_utils import sharded_variable, getdtype, variable_summaries
from common import assign_to_gpu, average_grads, find_trainable_variables,print_debug
from hparams import HParams
from tensorflow.contrib.rnn import LSTMCell
from factorized_lstm_cells import GLSTMCell, ResidualWrapper, FLSTMCell
class LM(object):
def __init__(self, hps, mode="train", ps_device="/gpu:0"):
self.hps = hps
data_size = hps.batch_size * hps.num_gpus
self.x = tf.placeholder(tf.int32, [data_size, hps.num_steps])
self.y = tf.placeholder(tf.int32, [data_size, hps.num_steps])
#self.w = tf.placeholder(tf.int32, [data_size, hps.num_steps])
losses = []
tower_grads = []
#xs = tf.split(0, hps.num_gpus, self.x)
print_debug("creating LM model with %d GPUs" % (hps.num_gpus))
xs = tf.split(self.x, hps.num_gpus, 0)
#ys = tf.split(0, hps.num_gpus, self.y)
ys = tf.split(self.y, hps.num_gpus, 0)
#ws = tf.split(0, hps.num_gpus, self.w)
for i in range(hps.num_gpus):
with tf.device(assign_to_gpu(i, ps_device)), tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
#loss = self._forward(i, xs[i], ys[i], ws[i])
loss = self._forward(i, xs[i], ys[i])
losses += [loss]
if mode == "train":
cur_grads = self._backward(loss, summaries=((i == hps.num_gpus - 1) and hps.do_summaries))
tower_grads += [cur_grads]
self.loss = tf.add_n(losses) / len(losses)
tf.summary.scalar("model/loss", self.loss)
self.global_step = tf.get_variable("global_step", [], tf.int32, trainable=False)
if mode == "train":
grads = average_grads(tower_grads)
if hps.optimizer == 1:
optimizer = tf.train.MomentumOptimizer(hps.learning_rate, 0.9)
elif hps.optimizer == 2:
optimizer = tf.train.AdamOptimizer(hps.learning_rate)
elif hps.optimizer == 3:
optimizer = tf.train.RMSPropOptimizer(learning_rate=hps.learning_rate)
elif hps.optimizer == 4:
optimizer = tf.train.GradientDescentOptimizer(hps.learning_rate)
else:
optimizer = tf.train.AdagradOptimizer(hps.learning_rate, initial_accumulator_value=1.0*float(hps.loss_scale)*float(hps.loss_scale))
self.train_op = optimizer.apply_gradients(grads, global_step=self.global_step)
self.summary_op = tf.summary.merge_all()
else:
self.train_op = tf.no_op()
if mode in ["train", "eval"] and hps.average_params:
with tf.name_scope(None): # This is needed due to EMA implementation silliness.
# Keep track of moving average of LSTM variables.
ema = tf.train.ExponentialMovingAverage(decay=0.999)
variables_to_average = find_trainable_variables("lstm")
self.train_op = tf.group(*[self.train_op, ema.apply(variables_to_average)])
self.avg_dict = ema.variables_to_restore(variables_to_average)
def _forward(self, gpu, x, y):
print("Setting up forward pass on GPU:%d" %gpu)
hps = self.hps
self.initial_states = []
for i in range(hps.num_layers):
with tf.device("/gpu:%d" % gpu):
state = (tf.Variable(tf.zeros([hps.batch_size, hps.state_size],
dtype=getdtype(hps, True)),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="state_c_%d_%d" % (gpu, i), dtype=getdtype(hps, True)),
tf.Variable(tf.zeros([hps.batch_size, hps.projected_size],
dtype=getdtype(hps, True)),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="state_h_%d_%d" % (gpu, i), dtype=getdtype(hps, True)),
)
self.initial_states += [state]
emb_vars = sharded_variable("emb", [hps.vocab_size, hps.emb_size],
hps.num_shards, dtype=getdtype(hps, False))
x = tf.nn.embedding_lookup(emb_vars, x) # [bs, steps, emb_size]
if hps.keep_prob < 1.0:
x = tf.nn.dropout(x, hps.keep_prob)
inputs = [tf.squeeze(input=tf.cast(v, getdtype(hps, True)), axis=[1]) for v in tf.split(value=x,
num_or_size_splits=hps.num_steps,
axis=1)]
for i in range(hps.num_layers):
with tf.variable_scope("lstm_%d" % i) as scope:
if hps.num_of_groups > 1:
assert(hps.fact_size is None)
print("Using G-LSTM")
print("Using %d groups" % hps.num_of_groups)
cell = GLSTMCell(num_units=hps.state_size,
num_proj=hps.projected_size,
number_of_groups=hps.num_of_groups)
else:
if hps.fact_size:
print("Using F-LSTM")
print("Using factorization: %d x %d x %d" %(2*hps.projected_size, int(hps.fact_size), 4*hps.state_size))
cell = FLSTMCell(num_units=hps.state_size,
num_proj=hps.projected_size,
factor_size=int(hps.fact_size))
else:
print("Using LSTMP")
cell = LSTMCell(num_units=hps.state_size,
num_proj=hps.projected_size)
state = tf.contrib.rnn.LSTMStateTuple(self.initial_states[i][0],
self.initial_states[i][1])
if hps.use_residual:
cell = ResidualWrapper(cell=cell)
for t in range(hps.num_steps):
if t > 0:
scope.reuse_variables()
inputs[t], state = cell(inputs[t], state)
if hps.keep_prob < 1.0:
inputs[t] = tf.nn.dropout(inputs[t], hps.keep_prob)
with tf.control_dependencies([self.initial_states[i][0].assign(state[0]),
self.initial_states[i][1].assign(state[1])]):
inputs[t] = tf.identity(inputs[t])
# inputs = tf.reshape(tf.concat(1, inputs), [-1, hps.projected_size])
inputs = tf.reshape(tf.concat(inputs, 1), [-1, hps.projected_size])
# Initialization ignores the fact that softmax_w is transposed. Twhat worked slightly better.
softmax_w = sharded_variable("softmax_w", [hps.vocab_size, hps.projected_size], hps.num_shards)
softmax_b = tf.get_variable("softmax_b", [hps.vocab_size])
if hps.num_sampled == 0:
full_softmax_w = tf.reshape(tf.concat(softmax_w, 1), [-1, hps.projected_size])
full_softmax_w = full_softmax_w[:hps.vocab_size, :]
logits = tf.matmul(tf.to_float(inputs), full_softmax_w, transpose_b=True) + softmax_b
targets = tf.reshape(y, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)
else:
targets = tf.reshape(y, [-1, 1])
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, targets, tf.to_float(inputs),
hps.num_sampled, hps.vocab_size)
#loss = tf.reduce_mean(loss * tf.reshape(w, [-1]))
loss = tf.reduce_mean(loss)
return loss
def _backward(self, loss, summaries=False):
hps = self.hps
loss = float(hps.loss_scale) * loss * hps.num_steps #??????? why?
emb_vars = find_trainable_variables("emb")
lstm_vars = find_trainable_variables("lstm")
softmax_vars = find_trainable_variables("softmax")
all_vars = emb_vars + lstm_vars + softmax_vars
grads = tf.gradients(loss, all_vars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
orig_grads = grads[:]
emb_grads = grads[:len(emb_vars)]
grads = grads[len(emb_vars):]
for i in range(len(emb_grads)):
assert isinstance(emb_grads[i], tf.IndexedSlices)
emb_grads[i] = tf.IndexedSlices(emb_grads[i].values * hps.batch_size, emb_grads[i].indices,
emb_grads[i].dense_shape)
lstm_grads = grads[:len(lstm_vars)]
softmax_grads = grads[len(lstm_vars):]
lstm_grads, lstm_norm = tf.clip_by_global_norm(lstm_grads, float(hps.loss_scale) * hps.max_grad_norm)
clipped_grads = emb_grads + lstm_grads + softmax_grads
assert len(clipped_grads) == len(orig_grads)
if summaries:
with tf.device("/cpu:0"):
tf.summary.scalar("model/lstm_grad_norm", lstm_norm)
tf.summary.scalar("model/lstm_grad_scale", tf.minimum(float(hps.loss_scale) * hps.max_grad_norm / lstm_norm, 1.0))
tf.summary.scalar("model/lstm_weight_norm", tf.global_norm(lstm_vars))
#embeding vars and grads
for v, g in zip(emb_vars, emb_grads):
name = v.name[6:]
gname = 'dLoss_by_' + name
variable_summaries(v, "Embedding_weights", name)
variable_summaries(g, "Embedding_gradients", gname)
#LSTM vars and gradients
for v, g in zip(lstm_vars, lstm_grads):
name = v.name[6:]
gname = 'dLoss_by_' + name
variable_summaries(v, "LSTM_weights", name)
variable_summaries(g, "LSTM_gradients", gname)
#softmax vars and gradients
for v, g in zip(softmax_vars, softmax_grads):
name = v.name[6:]
gname = 'dLoss_by_' + name
variable_summaries(v, "Softmax_weights", name)
variable_summaries(g, "Softmax_gradients", gname)
return list(zip(clipped_grads, all_vars))
@staticmethod
def get_default_hparams():
return HParams(
batch_size=128,
num_steps=20,
num_shards=8,
num_layers=1,
learning_rate=0.2,
max_grad_norm=10.0,
num_delayed_steps=150,
keep_prob=0.9,
optimizer=0,
vocab_size=10000,
emb_size=128,
state_size=2048,
projected_size=128,
num_sampled=128,
num_gpus=2,
float16_rnn=False,
float16_non_rnn=False,
average_params=True,
run_profiler=False,
do_summaries=False,
max_time=604800,
fact_size=None,
fnon_linearity="none",
num_of_groups=0,
save_model_every_min=30,
save_summary_every_min=16,
do_sharing=False,
use_residual=False,
loss_scale=1.0
)
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/testData/refactoring/changeSignature/switchPositionalParam.after.py
|
83
|
def bar(b, a):
pass
bar(2, 1)
|
ImaginationForPeople/sockjs-tornado
|
refs/heads/master
|
sockjs/tornado/transports/jsonp.py
|
9
|
# -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.jsonp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
JSONP transport implementation.
"""
import logging
from tornado.web import asynchronous
from sockjs.tornado import proto
from sockjs.tornado.transports import pollingbase
from sockjs.tornado.util import bytes_to_str, unquote_plus
LOG = logging.getLogger("tornado.general")
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id, False):
return
# Might get already detached because connection was closed in on_open
if not self.session:
return
if not self.session.send_queue:
self.session.start_heartbeat()
else:
self.session.flush()
def send_pack(self, message, binary=False):
if binary:
raise Exception('binary not supported for JSONPTransport')
self.active = False
try:
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type', 'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# TODO: Fix me
self.set_header('Etag', 'dummy')
self.write(msg)
self.flush(callback=self.send_complete)
except IOError:
# If connection dropped, make sure we close offending session instead
# of propagating error all way up.
self.session.delayed_close()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None or session.is_closed:
self.set_status(404)
return
data = bytes_to_str(self.request.body)
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
LOG.exception('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = unquote_plus(data[2:])
if not data:
LOG.debug('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
LOG.debug('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.on_messages(messages)
except Exception:
LOG.exception('jsonp_send: on_message() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
|
AltSchool/django
|
refs/heads/master
|
tests/auth_tests/test_management.py
|
4
|
from __future__ import unicode_literals
import locale
import sys
from datetime import date
from django.apps import apps
from django.contrib.auth import management
from django.contrib.auth.checks import check_user_model
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import (
AbstractBaseUser, Group, Permission, User,
)
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import models
from django.test import (
SimpleTestCase, TestCase, override_settings, override_system_checks,
)
from django.test.utils import isolate_apps
from django.utils import six
from django.utils.encoding import force_str
from django.utils.translation import ugettext_lazy as _
from .models import CustomUserNonUniqueUsername, CustomUserWithFK, Email
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if six.PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, six.binary_type)
if callable(inputs['password']):
return inputs['password']()
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ASCII on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
class MockTTY(object):
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
])
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='joe', password='qwerty')
self.stdout = six.StringIO()
self.stderr = six.StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: str(args) or 'foo'
with self.assertRaises(CommandError):
command.execute(username="joe", stdout=self.stdout, stderr=self.stderr)
def test_password_validation(self):
"""
A CommandError should be raised if the user enters in passwords which
fail validation three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: '1234567890'
abort_msg = "Aborting password change for user 'joe' after 3 attempts"
with self.assertRaisesMessage(CommandError, abort_msg):
command.execute(username="joe", stdout=self.stdout, stderr=self.stderr)
self.assertIn('This password is entirely numeric.', self.stderr.getvalue())
def test_that_changepassword_command_works_with_nonascii_output(self):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
User.objects.create_user(username='J\xfalia', password='qwerty')
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="J\xfalia", stdout=self.stdout)
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}],
)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({'password': "nopasswd"})
def test_nolocale(self):
"""
Check that createsuperuser does not break when no locale is set. See
ticket #16017.
"""
old_getdefaultlocale = locale.getdefaultlocale
try:
# Temporarily remove locale information
locale.getdefaultlocale = lambda: (None, None)
# Call the command in this new environment
call_command(
"createsuperuser",
interactive=True,
username="nolocale@somewhere.org",
email="nolocale@somewhere.org",
verbosity=0,
stdin=MockTTY(),
)
except TypeError:
self.fail("createsuperuser fails if the OS provides no information about the current locale")
finally:
# Re-apply locale information
locale.getdefaultlocale = old_getdefaultlocale
# If we were successful, a user should have been created
u = User.objects.get(username="nolocale@somewhere.org")
self.assertEqual(u.email, 'nolocale@somewhere.org')
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': 'nolocale@somewhere.org'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = six.StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
@override_settings(
AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=['my.custom.backend'],
)
def test_swappable_user_username_non_unique(self):
@mock_inputs({
'username': 'joe',
'password': 'nopasswd',
})
def createsuperuser():
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=True,
email="joe@somewhere.org",
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
for i in range(2):
createsuperuser()
users = CustomUserNonUniqueUsername.objects.filter(username="joe")
self.assertEqual(users.count(), 2)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin(object):
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = six.StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdin=sentinel,
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='janet@example.com',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='joe@example.com',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = 'mymail2@gmail.com'
with self.assertRaisesMessage(CommandError,
'email instance with email %r does not exist.' % non_existent_email):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
def test_password_validation(self):
"""
Creation should fail if the password fails validation.
"""
new_io = six.StringIO()
# Returns '1234567890' the first two times it is called, then
# 'password' subsequently.
def bad_then_good_password(index=[0]):
index[0] += 1
if index[0] <= 2:
return '1234567890'
return 'password'
@mock_inputs({
'password': bad_then_good_password,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"This password is entirely numeric.\n"
"Superuser created successfully."
)
test(self)
def test_validation_mismatched_passwords(self):
"""
Creation should fail if the user enters mismatched passwords.
"""
new_io = six.StringIO()
# The first two passwords do not match, but the second two do match and
# are valid.
entered_passwords = ["password", "not password", "password2", "password2"]
def mismatched_passwords_then_matched():
return entered_passwords.pop(0)
@mock_inputs({
'password': mismatched_passwords_then_matched,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Your passwords didn't match.\n"
"Superuser created successfully."
)
test(self)
def test_validation_blank_password_entered(self):
"""
Creation should fail if the user enters blank passwords.
"""
new_io = six.StringIO()
# The first two passwords are empty strings, but the second two are
# valid.
entered_passwords = ["", "", "password2", "password2"]
def blank_passwords_then_valid():
return entered_passwords.pop(0)
@mock_inputs({
'password': blank_passwords_then_valid,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Blank passwords aren't allowed.\n"
"Superuser created successfully."
)
test(self)
class CustomUserModelValidationTestCase(SimpleTestCase):
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonListRequiredFields')
@override_system_checks([check_user_model])
@isolate_apps('auth_tests', kwarg_name='apps')
def test_required_fields_is_list(self, apps):
"""REQUIRED_FIELDS should be a list."""
class CustomUserNonListRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
errors = checks.run_checks(app_configs=apps.get_app_configs())
expected = [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserBadRequiredFields')
@override_system_checks([check_user_model])
@isolate_apps('auth_tests', kwarg_name='apps')
def test_username_not_in_required_fields(self, apps):
"""USERNAME_FIELD should not appear in REQUIRED_FIELDS."""
class CustomUserBadRequiredFields(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
errors = checks.run_checks(apps.get_app_configs())
expected = [
checks.Error(
("The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername')
@override_system_checks([check_user_model])
def test_username_non_unique(self):
"""
A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
errors = checks.run_checks()
expected = [
checks.Error(
("'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'."),
hint=None,
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
]
self.assertEqual(errors, expected)
with self.settings(AUTHENTICATION_BACKENDS=['my.custom.backend']):
errors = checks.run_checks()
expected = [
checks.Warning(
("'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique."),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=CustomUserNonUniqueUsername,
id='auth.W004',
)
]
self.assertEqual(errors, expected)
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = Permission._meta.permissions[:]
self._original_default_permissions = Permission._meta.default_permissions
self._original_verbose_name = Permission._meta.verbose_name
def tearDown(self):
Permission._meta.permissions = self._original_permissions
Permission._meta.default_permissions = self._original_default_permissions
Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
auth_app_config = apps.get_app_config('auth')
# check duplicated default permission
Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# check duplicated custom permissions
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# should not raise anything
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(auth_app_config, verbosity=0)
def test_default_permissions(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(auth_app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.default_permissions = []
create_permissions(auth_app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5
six.assertRaisesRegex(self, exceptions.ValidationError,
"The verbose_name of auth.permission is longer than 244 characters",
create_permissions, auth_app_config, verbosity=0)
def test_custom_permission_name_length(self):
auth_app_config = apps.get_app_config('auth')
ContentType.objects.get_by_natural_key('auth', 'permission')
custom_perm_name = 'a' * 256
Permission._meta.permissions = [
('my_custom_permission', custom_perm_name),
]
try:
msg = (
"The permission name %s of auth.permission is longer than "
"255 characters" % custom_perm_name
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
create_permissions(auth_app_config, verbosity=0)
finally:
Permission._meta.permissions = []
|
jamesblunt/nodeenv
|
refs/heads/master
|
nodeenv.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nodeenv
~~~~~~~
Node.js virtual environment
:copyright: (c) 2014 by Eugene Kalinin
:license: BSD, see LICENSE for more details.
"""
import contextlib
import io
import sys
import os
import re
import stat
import logging
import operator
import optparse
import subprocess
import tarfile
import pipes
try: # pragma: no cover (py2 only)
from ConfigParser import SafeConfigParser as ConfigParser
from HTMLParser import HTMLParser
import urllib2
iteritems = operator.methodcaller('iteritems')
except ImportError: # pragma: no cover (py3 only)
from configparser import ConfigParser
from html.parser import HTMLParser
import urllib.request as urllib2
iteritems = operator.methodcaller('items')
from pkg_resources import parse_version
nodeenv_version = '0.13.6'
join = os.path.join
abspath = os.path.abspath
src_domain = "nodejs.org"
is_PY3 = sys.version_info[0] == 3
if is_PY3:
from functools import cmp_to_key
# ---------------------------------------------------------
# Utils
# https://github.com/jhermann/waif/blob/master/python/to_uft8.py
def to_utf8(text):
"""Convert given text to UTF-8 encoding (as far as possible)."""
if not text or is_PY3:
return text
try: # unicode or pure ascii
return text.encode("utf8")
except UnicodeDecodeError:
try: # successful UTF-8 decode means it's pretty sure UTF-8
text.decode("utf8")
return text
except UnicodeDecodeError:
try: # get desperate; and yes,
# this has a western hemisphere bias
return text.decode("cp1252").encode("utf8")
except UnicodeDecodeError:
pass
return text # return unchanged, hope for the best
class Config(object):
"""
Configuration namespace.
"""
# Defaults
node = 'latest'
npm = 'latest'
with_npm = False
jobs = '2'
without_ssl = False
debug = False
profile = False
make = 'make'
prebuilt = False
@classmethod
def _load(cls, configfiles, verbose=False):
"""
Load configuration from the given files in reverse order,
if they exist and have a [nodeenv] section.
"""
for configfile in reversed(configfiles):
configfile = os.path.expanduser(configfile)
if not os.path.exists(configfile):
continue
ini_file = ConfigParser()
ini_file.read(configfile)
section = "nodeenv"
if not ini_file.has_section(section):
continue
for attr, val in iteritems(vars(cls)):
if attr.startswith('_') or not \
ini_file.has_option(section, attr):
continue
if isinstance(val, bool):
val = ini_file.getboolean(section, attr)
else:
val = ini_file.get(section, attr)
if verbose:
print('CONFIG {0}: {1} = {2}'.format(
os.path.basename(configfile), attr, val))
setattr(cls, attr, val)
@classmethod
def _dump(cls):
"""
Print defaults for the README.
"""
print(" [nodeenv]")
print(" " + "\n ".join(
"%s = %s" % (k, v) for k, v in sorted(iteritems(vars(cls)))
if not k.startswith('_')))
Config._default = dict(
(attr, val) for attr, val in iteritems(vars(Config))
if not attr.startswith('_')
)
def clear_output(out):
"""
Remove new-lines and
"""
return out.decode('utf-8').replace('\n', '')
def remove_env_bin_from_path(env, env_bin_dir):
"""
Remove bin directory of the current environment from PATH
"""
return env.replace(env_bin_dir + ':', '')
def node_version_from_opt(opt):
"""
Parse the node version from the optparse options
"""
if opt.node == 'system':
out, err = subprocess.Popen(
["node", "--version"], stdout=subprocess.PIPE).communicate()
return parse_version(clear_output(out).replace('v', ''))
return parse_version(opt.node)
def create_logger():
"""
Create logger for diagnostic
"""
# create logger
logger = logging.getLogger("nodeenv")
logger.setLevel(logging.INFO)
# monkey patch
def emit(self, record):
msg = self.format(record)
fs = "%s" if getattr(record, "continued", False) else "%s\n"
self.stream.write(fs % to_utf8(msg))
self.flush()
logging.StreamHandler.emit = emit
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(fmt="%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
return logger
logger = create_logger()
def parse_args(check=True):
"""
Parses command line arguments.
Set `check` to False to skip validation checks.
"""
parser = optparse.OptionParser(
version=nodeenv_version,
usage="%prog [OPTIONS] ENV_DIR")
parser.add_option(
'-n', '--node', dest='node', metavar='NODE_VER', default=Config.node,
help='The node.js version to use, e.g., '
'--node=0.4.3 will use the node-v0.4.3 '
'to create the new environment. '
'The default is last stable version (`latest`). '
'Use `system` to use system-wide node.')
parser.add_option(
'-i', '--iojs',
action='store_true', dest='io', default=False,
help='Use iojs instead of nodejs.')
parser.add_option(
'-j', '--jobs', dest='jobs', default=Config.jobs,
help='Sets number of parallel commands at node.js compilation. '
'The default is 2 jobs.')
parser.add_option(
'--load-average', dest='load_average',
help='Sets maximum load average for executing parallel commands '
'at node.js compilation.')
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help="Verbose mode")
parser.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help="Quiet mode")
parser.add_option(
'-C', '--config-file', dest='config_file', default=None,
help="Load a different file than '~/.nodeenvrc'. "
"Pass an empty string for no config (use built-in defaults).")
parser.add_option(
'-r', '--requirements',
dest='requirements', default='', metavar='FILENAME',
help='Install all the packages listed in the given requirements file.')
parser.add_option(
'--prompt', dest='prompt',
help='Provides an alternative prompt prefix for this environment')
parser.add_option(
'-l', '--list', dest='list',
action='store_true', default=False,
help='Lists available node.js versions')
parser.add_option(
'--update', dest='update',
action='store_true', default=False,
help='Install npm packages from file without node')
parser.add_option(
'--without-ssl', dest='without_ssl',
action='store_true', default=Config.without_ssl,
help='Build node.js without SSL support')
parser.add_option(
'--debug', dest='debug',
action='store_true', default=Config.debug,
help='Build debug variant of the node.js')
parser.add_option(
'--profile', dest='profile',
action='store_true', default=Config.profile,
help='Enable profiling for node.js')
parser.add_option(
'--with-npm', dest='with_npm',
action='store_true', default=Config.with_npm,
help='Build without installing npm into the new virtual environment. '
'Required for node.js < 0.6.3. By default, the npm included with '
'node.js is used.')
parser.add_option(
'--npm', dest='npm',
metavar='NPM_VER', default=Config.npm,
help='The npm version to use, e.g., '
'--npm=0.3.18 will use the npm-0.3.18.tgz '
'tarball to install. '
'The default is last available version (`latest`).')
parser.add_option(
'--no-npm-clean', dest='no_npm_clean',
action='store_true', default=False,
help='Skip the npm 0.x cleanup. Cleanup is enabled by default.')
parser.add_option(
'--python-virtualenv', '-p', dest='python_virtualenv',
action='store_true', default=False,
help='Use current python virtualenv')
parser.add_option(
'--clean-src', '-c', dest='clean_src',
action='store_true', default=False,
help='Remove "src" directory after installation')
parser.add_option(
'--force', dest='force',
action='store_true', default=False,
help='Force installation in a pre-existing directory')
parser.add_option(
'--make', '-m', dest='make_path',
metavar='MAKE_PATH',
help='Path to make command',
default=Config.make)
parser.add_option(
'--prebuilt', dest='prebuilt',
action='store_true', default=Config.prebuilt,
help='Install node.js from prebuilt package')
options, args = parser.parse_args()
if options.config_file is None:
options.config_file = ["./setup.cfg", "~/.nodeenvrc"]
elif not options.config_file:
options.config_file = []
else:
# Make sure that explicitly provided files exist
if not os.path.exists(options.config_file):
parser.error("Config file '{0}' doesn't exist!".format(
options.config_file))
options.config_file = [options.config_file]
if not check:
return options, args
if not options.list and not options.python_virtualenv:
if not args:
parser.error('You must provide a DEST_DIR or '
'use current python virtualenv')
if len(args) > 1:
parser.error('There must be only one argument: DEST_DIR '
'(you gave: {0})'.format(' '.join(args)))
return options, args
def mkdir(path):
"""
Create directory
"""
if not os.path.exists(path):
logger.debug(' * Creating: %s ... ', path, extra=dict(continued=True))
os.makedirs(path)
logger.debug('done.')
else:
logger.debug(' * Directory %s already exists', path)
def writefile(dest, content, overwrite=True, append=False):
"""
Create file and write content in it
"""
content = to_utf8(content)
if is_PY3:
content = bytes(content, 'utf-8')
if not os.path.exists(dest):
logger.debug(' * Writing %s ... ', dest, extra=dict(continued=True))
with open(dest, 'wb') as f:
f.write(content)
logger.debug('done.')
return
else:
with open(dest, 'rb') as f:
c = f.read()
if c == content:
logger.debug(' * Content %s already in place', dest)
return
if not overwrite:
logger.info(' * File %s exists with different content; '
' not overwriting', dest)
return
if append:
logger.info(' * Appending data to %s', dest)
with open(dest, 'ab') as f:
f.write(DISABLE_PROMPT.encode('utf-8'))
f.write(content)
f.write(ENABLE_PROMPT.encode('utf-8'))
return
logger.info(' * Overwriting %s with new content', dest)
with open(dest, 'wb') as f:
f.write(content)
def callit(cmd, show_stdout=True, in_shell=False,
cwd=None, extra_env=None):
"""
Execute cmd line in sub-shell
"""
all_output = []
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20] + "..." + part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
logger.debug(" ** Running command %s" % cmd_desc)
if in_shell:
cmd = ' '.join(cmd)
# output
stdout = subprocess.PIPE
# env
if extra_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
else:
env = None
# execute
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env, shell=in_shell)
except Exception:
e = sys.exc_info()[1]
logger.error("Error %s while executing command %s" % (e, cmd_desc))
raise
stdout = proc.stdout
while stdout:
line = stdout.readline()
if not line:
break
line = line.decode('utf-8').rstrip()
all_output.append(line)
if show_stdout:
logger.info(line)
proc.wait()
# error handler
if proc.returncode:
if show_stdout:
for s in all_output:
logger.critical(s)
raise OSError("Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
return proc.returncode, all_output
def get_node_src_url(version, postfix=''):
node_name = '%s-v%s%s' % (get_binary_prefix(), version, postfix)
tar_name = '%s.tar.gz' % (node_name)
if parse_version(version) > parse_version("0.5.0"):
node_url = 'https://%s/dist/v%s/%s' % (src_domain, version, tar_name)
else:
node_url = 'https://%s/dist/%s' % (src_domain, tar_name)
return node_url
@contextlib.contextmanager
def tarfile_open(*args, **kwargs):
"""Compatibility layer because py26."""
tf = tarfile.open(*args, **kwargs)
try:
yield tf
finally:
tf.close()
def download_node(node_url, src_dir, env_dir, opt):
"""
Download source code
"""
tar_contents = io.BytesIO(urlopen(node_url).read())
with tarfile_open(fileobj=tar_contents) as tarfile_obj:
tarfile_obj.extractall(src_dir)
logger.info(')', extra=dict(continued=True))
def get_node_src_url_postfix(opt):
if not opt.prebuilt:
return ''
import platform
postfix_system = platform.system().lower()
arches = {'x86_64': 'x64', 'i686': 'x86'}
postfix_arch = arches[platform.machine()]
return '-{0}-{1}'.format(postfix_system, postfix_arch)
def urlopen(url):
home_url = "https://github.com/ekalinin/nodeenv/"
headers = {'User-Agent': 'nodeenv/%s (%s)' % (nodeenv_version, home_url)}
req = urllib2.Request(url, None, headers)
return urllib2.urlopen(req)
# ---------------------------------------------------------
# Virtual environment functions
def copy_node_from_prebuilt(env_dir, src_dir):
"""
Copy prebuilt binaries into environment
"""
logger.info('.', extra=dict(continued=True))
prefix = get_binary_prefix()
callit(['cp', '-a', src_dir + '/%s-v*/*' % prefix, env_dir], True, env_dir)
logger.info('.', extra=dict(continued=True))
def build_node_from_src(env_dir, src_dir, node_src_dir, opt):
env = {}
make_param_names = ['load-average', 'jobs']
make_param_values = map(
lambda x: getattr(opt, x.replace('-', '_')),
make_param_names)
make_opts = [
'--{0}={1}'.format(name, value)
if len(value) > 0 else '--{0}'.format(name)
for name, value in zip(make_param_names, make_param_values)
if value is not None
]
if getattr(sys.version_info, 'major', sys.version_info[0]) > 2:
# Currently, the node.js build scripts are using python2.*,
# therefore we need to temporarily point python exec to the
# python 2.* version in this case.
try:
_, which_python2_output = callit(
['which', 'python2'], opt.verbose, True, node_src_dir, env
)
python2_path = which_python2_output[0]
except (OSError, IndexError):
raise OSError(
'Python >=3.0 virtualenv detected, but no python2 '
'command (required for building node.js) was found'
)
logger.debug(' * Temporarily pointing python to %s', python2_path)
node_tmpbin_dir = join(src_dir, 'tmpbin')
node_tmpbin_link = join(node_tmpbin_dir, 'python')
mkdir(node_tmpbin_dir)
if not os.path.exists(node_tmpbin_link):
callit(['ln', '-s', python2_path, node_tmpbin_link])
env['PATH'] = '{}:{}'.format(node_tmpbin_dir,
os.environ.get('PATH', ''))
conf_cmd = []
conf_cmd.append('./configure')
conf_cmd.append('--prefix=%s' % pipes.quote(env_dir))
if opt.without_ssl:
conf_cmd.append('--without-ssl')
if opt.debug:
conf_cmd.append('--debug')
if opt.profile:
conf_cmd.append('--profile')
make_cmd = opt.make_path
callit(conf_cmd, opt.verbose, True, node_src_dir, env)
logger.info('.', extra=dict(continued=True))
callit([make_cmd] + make_opts, opt.verbose, True, node_src_dir, env)
logger.info('.', extra=dict(continued=True))
callit([make_cmd + ' install'], opt.verbose, True, node_src_dir, env)
def get_binary_prefix():
return to_utf8('node' if src_domain == 'nodejs.org' else 'iojs')
def install_node(env_dir, src_dir, opt):
"""
Download source code for node.js, unpack it
and install it in virtual environment.
"""
prefix = get_binary_prefix()
logger.info(' * Install %s (%s' % (prefix, opt.node),
extra=dict(continued=True))
node_url = get_node_src_url(opt.node, get_node_src_url_postfix(opt))
node_src_dir = join(src_dir, to_utf8('%s-v%s' % (prefix, opt.node)))
env_dir = abspath(env_dir)
# get src if not downloaded yet
if not os.path.exists(node_src_dir):
download_node(node_url, src_dir, env_dir, opt)
logger.info('.', extra=dict(continued=True))
if opt.prebuilt:
copy_node_from_prebuilt(env_dir, src_dir)
else:
build_node_from_src(env_dir, src_dir, node_src_dir, opt)
logger.info(' done.')
def install_npm(env_dir, src_dir, opt):
"""
Download source code for npm, unpack it
and install it in virtual environment.
"""
logger.info(' * Install npm.js (%s) ... ' % opt.npm,
extra=dict(continued=True))
npm_contents = urlopen('https://www.npmjs.org/install.sh').read()
env = dict(
os.environ,
clean='no' if opt.no_npm_clean else 'yes',
npm_install=opt.npm,
)
proc = subprocess.Popen(
(
'bash', '-c',
'. {0} && exec bash'.format(
pipes.quote(join(env_dir, 'bin', 'activate')),
)
),
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate(npm_contents)
if opt.verbose:
logger.info(out)
logger.info('done.')
def install_packages(env_dir, opt):
"""
Install node.js packages via npm
"""
logger.info(' * Install node.js packages ... ',
extra=dict(continued=True))
packages = [package.strip() for package in
open(opt.requirements).readlines()]
activate_path = join(env_dir, 'bin', 'activate')
real_npm_ver = opt.npm if opt.npm.count(".") == 2 else opt.npm + ".0"
if opt.npm == "latest" or real_npm_ver >= "1.0.0":
cmd = '. ' + pipes.quote(activate_path) + \
' && npm install -g %(pack)s'
else:
cmd = '. ' + pipes.quote(activate_path) + \
' && npm install %(pack)s' + \
' && npm activate %(pack)s'
for package in packages:
if not package:
continue
callit(cmd=[
cmd % {"pack": package}], show_stdout=opt.verbose, in_shell=True)
logger.info('done.')
def install_activate(env_dir, opt):
"""
Install virtual environment activation script
"""
files = {'activate': ACTIVATE_SH, 'shim': SHIM}
if opt.node == "system":
files["node"] = SHIM
bin_dir = join(env_dir, 'bin')
mod_dir = join('lib', 'node_modules')
prompt = opt.prompt or '(%s)' % os.path.basename(os.path.abspath(env_dir))
mode_0755 = (stat.S_IRWXU | stat.S_IXGRP |
stat.S_IRGRP | stat.S_IROTH | stat.S_IXOTH)
shim_node = join(bin_dir, "node")
shim_nodejs = join(bin_dir, "nodejs")
if opt.node == "system":
env = os.environ.copy()
env.update({'PATH': remove_env_bin_from_path(env['PATH'], bin_dir)})
for candidate in ("nodejs", "node"):
which_node_output, _ = subprocess.Popen(
["which", candidate],
stdout=subprocess.PIPE, env=env).communicate()
shim_node = clear_output(which_node_output)
if shim_node:
break
assert shim_node, "Did not find nodejs or node system executable"
for name, content in files.items():
file_path = join(bin_dir, name)
content = content.replace('__NODE_VIRTUAL_PROMPT__', prompt)
content = content.replace('__NODE_VIRTUAL_ENV__',
os.path.abspath(env_dir))
content = content.replace('__SHIM_NODE__', shim_node)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
content = content.replace('__MOD_NAME__', mod_dir)
# if we call in the same environment:
# $ nodeenv -p --prebuilt
# $ nodeenv -p --node=system
# we should get `bin/node` not as binary+string.
# `bin/activate` should be appended if we inside
# existing python's virtual environment
need_append = 0 if name in ('node', 'shim') else opt.python_virtualenv
writefile(file_path, content, append=need_append)
os.chmod(file_path, mode_0755)
if not os.path.exists(shim_nodejs):
os.symlink("node", shim_nodejs)
def create_environment(env_dir, opt):
"""
Creates a new environment in ``env_dir``.
"""
if os.path.exists(env_dir) and not opt.python_virtualenv:
logger.info(' * Environment already exists: %s', env_dir)
if not opt.force:
sys.exit(2)
src_dir = to_utf8(abspath(join(env_dir, 'src')))
mkdir(src_dir)
if opt.node != "system":
install_node(env_dir, src_dir, opt)
else:
mkdir(join(env_dir, 'bin'))
mkdir(join(env_dir, 'lib'))
mkdir(join(env_dir, 'lib', 'node_modules'))
# activate script install must be
# before npm install, npm use activate
# for install
install_activate(env_dir, opt)
if node_version_from_opt(opt) < parse_version("0.6.3") or opt.with_npm:
install_npm(env_dir, src_dir, opt)
if opt.requirements:
install_packages(env_dir, opt)
# Cleanup
if opt.clean_src:
callit(['rm -rf', pipes.quote(src_dir)], opt.verbose, True, env_dir)
class GetsAHrefs(HTMLParser):
def __init__(self):
# Old style class in py2 :(
HTMLParser.__init__(self)
self.hrefs = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
self.hrefs.append(dict(attrs).get('href', ''))
VERSION_RE = re.compile('\d+\.\d+\.\d+')
def _py2_cmp(a, b):
# -1 = a < b, 0 = eq, 1 = a > b
return (a > b) - (a < b)
def compare_versions(version, other_version):
version_tuple = version.split('.')
other_tuple = other_version.split('.')
version_length = len(version_tuple)
other_length = len(other_tuple)
version_dots = min(version_length, other_length)
for i in range(version_dots):
a = int(version_tuple[i])
b = int(other_tuple[i])
cmp_value = _py2_cmp(a, b)
if cmp_value != 0:
return cmp_value
return _py2_cmp(version_length, other_length)
def get_node_versions():
response = urlopen('https://{0}/dist'.format(src_domain))
href_parser = GetsAHrefs()
href_parser.feed(response.read().decode('UTF-8'))
versions = set(
VERSION_RE.search(href).group()
for href in href_parser.hrefs
if VERSION_RE.search(href)
)
if is_PY3:
key_compare = cmp_to_key(compare_versions)
versions = sorted(versions, key=key_compare)
else:
versions = sorted(versions, cmp=compare_versions)
return versions
def print_node_versions():
"""
Prints into stdout all available node.js versions
"""
versions = get_node_versions()
chunks_of_8 = [
versions[pos:pos + 8] for pos in range(0, len(versions), 8)
]
for chunk in chunks_of_8:
logger.info('\t'.join(chunk))
def get_last_stable_node_version():
"""
Return last stable node.js version
"""
response = urlopen('https://%s/dist/latest/' % (src_domain))
href_parser = GetsAHrefs()
href_parser.feed(response.read().decode('UTF-8'))
links = []
pattern = re.compile(r'''%s-v([0-9]+)\.([0-9]+)\.([0-9]+)\.tar\.gz''' % (
get_binary_prefix()))
for href in href_parser.hrefs:
match = pattern.match(href)
if match:
version = u'.'.join(match.groups())
major, minor, revision = map(int, match.groups())
links.append((version, major, minor, revision))
break
return links[-1][0]
def get_env_dir(opt, args):
if opt.python_virtualenv:
if hasattr(sys, 'real_prefix'):
res = sys.prefix
elif hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
res = sys.prefix
else:
logger.error('No python virtualenv is available')
sys.exit(2)
else:
res = args[0]
return to_utf8(res)
def is_installed(name):
try:
devnull = open(os.devnull)
subprocess.Popen([name], stdout=devnull, stderr=devnull)
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def main():
"""
Entry point
"""
# quick&dirty way to help update the README
if "--dump-config-defaults" in sys.argv:
Config._dump()
return
opt, args = parse_args(check=False)
Config._load(opt.config_file, opt.verbose)
opt, args = parse_args()
if opt.io:
global src_domain
src_domain = "iojs.org"
if not opt.node or opt.node.lower() == "latest":
opt.node = get_last_stable_node_version()
if opt.list:
print_node_versions()
elif opt.update:
env_dir = get_env_dir(opt, args)
install_packages(env_dir, opt)
else:
env_dir = get_env_dir(opt, args)
create_environment(env_dir, opt)
# ---------------------------------------------------------
# Shell scripts content
DISABLE_PROMPT = """
# disable nodeenv's prompt
# (prompt already changed by original virtualenv's script)
# https://github.com/ekalinin/nodeenv/issues/26
NODE_VIRTUAL_ENV_DISABLE_PROMPT=1
"""
ENABLE_PROMPT = """
unset NODE_VIRTUAL_ENV_DISABLE_PROMPT
"""
SHIM = """#!/usr/bin/env bash
export NODE_PATH=__NODE_VIRTUAL_ENV__/lib/node_modules
export NPM_CONFIG_PREFIX=__NODE_VIRTUAL_ENV__
export npm_config_prefix=__NODE_VIRTUAL_ENV__
exec __SHIM_NODE__ "$@"
"""
ACTIVATE_SH = """
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate_node () {
# reset old environment variables
if [ -n "$_OLD_NODE_VIRTUAL_PATH" ] ; then
PATH="$_OLD_NODE_VIRTUAL_PATH"
export PATH
unset _OLD_NODE_VIRTUAL_PATH
NODE_PATH="$_OLD_NODE_PATH"
export NODE_PATH
unset _OLD_NODE_PATH
NPM_CONFIG_PREFIX="$_OLD_NPM_CONFIG_PREFIX"
npm_config_prefix="$_OLD_npm_config_prefix"
export NPM_CONFIG_PREFIX
export npm_config_prefix
unset _OLD_NPM_CONFIG_PREFIX
unset _OLD_npm_config_prefix
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
if [ -n "$_OLD_NODE_VIRTUAL_PS1" ] ; then
PS1="$_OLD_NODE_VIRTUAL_PS1"
export PS1
unset _OLD_NODE_VIRTUAL_PS1
fi
unset NODE_VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate_node
fi
}
freeze () {
local NPM_VER=`npm -v | cut -d '.' -f 1`
local re="[a-zA-Z0-9\.\-]+@[0-9]+\.[0-9]+\.[0-9]+([\+\-][a-zA-Z0-9\.\-]+)*"
if [ "$NPM_VER" = '0' ]; then
NPM_LIST=`npm list installed active 2>/dev/null | \
cut -d ' ' -f 1 | grep -v npm`
else
local npmls="npm ls -g"
if [ "$1" = "-l" ]; then
npmls="npm ls"
shift
fi
NPM_LIST=$(eval ${npmls} | grep -E '^.{4}\w{1}'| \
grep -o -E "$re"| grep -v npm)
fi
if [ -z "$@" ]; then
echo "$NPM_LIST"
else
echo "$NPM_LIST" > $@
fi
}
# unset irrelavent variables
deactivate_node nondestructive
# find the directory of this script
# http://stackoverflow.com/a/246128
if [ "${BASH_SOURCE}" ] ; then
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
NODE_VIRTUAL_ENV="$(dirname "$DIR")"
else
# dash not movable. fix use case:
# dash -c " . node-env/bin/activate && node -v"
NODE_VIRTUAL_ENV="__NODE_VIRTUAL_ENV__"
fi
# NODE_VIRTUAL_ENV is the parent of the directory where this script is
export NODE_VIRTUAL_ENV
_OLD_NODE_VIRTUAL_PATH="$PATH"
PATH="$NODE_VIRTUAL_ENV/__BIN_NAME__:$PATH"
export PATH
_OLD_NODE_PATH="$NODE_PATH"
NODE_PATH="$NODE_VIRTUAL_ENV/__MOD_NAME__"
export NODE_PATH
_OLD_NPM_CONFIG_PREFIX="$NPM_CONFIG_PREFIX"
_OLD_npm_config_prefix="$npm_config_prefix"
NPM_CONFIG_PREFIX="$NODE_VIRTUAL_ENV"
npm_config_prefix="$NODE_VIRTUAL_ENV"
export NPM_CONFIG_PREFIX
export npm_config_prefix
if [ -z "$NODE_VIRTUAL_ENV_DISABLE_PROMPT" ] ; then
_OLD_NODE_VIRTUAL_PS1="$PS1"
if [ "x__NODE_VIRTUAL_PROMPT__" != x ] ; then
PS1="__NODE_VIRTUAL_PROMPT__$PS1"
else
if [ "`basename \"$NODE_VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$NODE_VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$NODE_VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
"""
if __name__ == '__main__':
main()
|
ford-prefect/cerbero
|
refs/heads/openwebrtc
|
cerbero/errors.py
|
16
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from gettext import gettext as _
class CerberoException(Exception):
header = ''
msg = ''
def __init__(self, msg=''):
self.msg = msg
Exception.__init__(self, self.header + msg)
class ConfigurationError(CerberoException):
header = 'Configuration Error: '
class UsageError(CerberoException):
header = 'Usage Error: '
class FatalError(CerberoException):
header = 'Fatal Error: '
class CommandError(CerberoException):
header = 'Command Error: '
class BuildStepError(CerberoException):
recipe = ''
step = ''
def __init__(self, recipe, step, trace=''):
self.recipe = recipe
self.step = step
CerberoException.__init__(self, _("Recipe '%s' failed at the build "
"step '%s'\n%s") % (recipe, step, trace))
class RecipeNotFoundError(CerberoException):
def __init__(self, recipe):
CerberoException.__init__(self, _("Recipe '%s' not found") % recipe)
class PackageNotFoundError(CerberoException):
def __init__(self, package):
CerberoException.__init__(self, _("Package '%s' not found") % package)
class EmptyPackageError(CerberoException):
def __init__(self, package):
CerberoException.__init__(self, _("Package '%s' is empty") % package)
class MissingPackageFilesError(CerberoException):
def __init__(self, files):
CerberoException.__init__(self, _("The following files required by "
"this package are missing:\n %s") % '\n'.join(files))
class InvalidRecipeError(CerberoException):
def __init__(self, recipe, message=''):
CerberoException.__init__(self,
_("Recipe %s is invalid:\n%s") % (recipe, message))
class AbortedError(Exception):
pass
|
Aasmi/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/__init__.py
|
306
|
"""
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest']
|
guedou/scapy-codecov
|
refs/heads/master
|
scapy/arch/__init__.py
|
2
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Operating system specific functionality.
"""
import socket
from scapy.arch.consts import LINUX, OPENBSD, FREEBSD, NETBSD, DARWIN, \
SOLARIS, WINDOWS, BSD, X86_64, ARM_64, LOOPBACK_NAME
from scapy.error import *
import scapy.config
from scapy.pton_ntop import inet_pton
try:
from matplotlib import get_backend as matplotlib_get_backend
import matplotlib.pyplot as plt
MATPLOTLIB = 1
if "inline" in matplotlib_get_backend():
MATPLOTLIB_INLINED = 1
else:
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = {"marker": "+"}
# RuntimeError to catch gtk "Cannot open display" error
except (ImportError, RuntimeError):
plt = None
MATPLOTLIB = 0
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()
log_loading.info("Can't import matplotlib. Won't be able to plot.")
try:
import pyx
PYX=1
except ImportError:
log_loading.info("Can't import PyX. Won't be able to use psdump() or pdfdump().")
PYX=0
def str2mac(s):
return ("%02x:"*6)[:-1] % tuple(map(ord, s))
if not WINDOWS:
if not scapy.config.conf.use_pcap and not scapy.config.conf.use_dnet:
from scapy.arch.bpf.core import get_if_raw_addr
def get_if_addr(iff):
return socket.inet_ntoa(get_if_raw_addr(iff))
def get_if_hwaddr(iff):
addrfamily, mac = get_if_raw_hwaddr(iff)
if addrfamily in [ARPHDR_ETHER,ARPHDR_LOOPBACK]:
return str2mac(mac)
else:
raise Scapy_Exception("Unsupported address family (%i) for interface [%s]" % (addrfamily,iff))
# Next step is to import following architecture specific functions:
# def get_if_raw_hwaddr(iff)
# def get_if_raw_addr(iff):
# def get_if_list():
# def get_working_if():
# def attach_filter(s, filter, iface):
# def set_promisc(s,iff,val=1):
# def read_routes():
# def read_routes6():
# def get_if(iff,cmd):
# def get_if_index(iff):
if LINUX:
from scapy.arch.linux import *
if scapy.config.conf.use_pcap or scapy.config.conf.use_dnet:
from scapy.arch.pcapdnet import *
elif BSD:
from scapy.arch.unix import read_routes, read_routes6, in6_getifaddr
if scapy.config.conf.use_pcap or scapy.config.conf.use_dnet:
from scapy.arch.pcapdnet import *
else:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, L2bpfSocket, L3bpfSocket
from scapy.arch.bpf.core import *
scapy.config.conf.use_bpf = True
scapy.config.conf.L2listen = L2bpfListenSocket
scapy.config.conf.L2socket = L2bpfSocket
scapy.config.conf.L3socket = L3bpfSocket
elif SOLARIS:
from scapy.arch.solaris import *
elif WINDOWS:
from scapy.arch.windows import *
from scapy.arch.windows.compatibility import *
if scapy.config.conf.iface is None:
scapy.config.conf.iface = LOOPBACK_NAME
def get_if_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in human readable form. If no global address is found,
None is returned.
"""
for x in in6_getifaddr():
if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL:
return x[0]
return None
def get_if_raw_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
"""
ip6= get_if_addr6(iff)
if ip6 is not None:
return inet_pton(socket.AF_INET6, ip6)
return None
|
basnijholt/holoviews
|
refs/heads/master
|
holoviews/tests/plotting/plotly/testscatterplot.py
|
2
|
import numpy as np
from holoviews.element import Scatter
from .testplot import TestPlotlyPlot
class TestScatterPlot(TestPlotlyPlot):
def test_scatter_state(self):
scatter = Scatter([3, 2, 1])
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['y'], np.array([3, 2, 1]))
self.assertEqual(state['data'][0]['mode'], 'markers')
self.assertEqual(state['layout']['yaxis']['range'], [1, 3])
def test_scatter_inverted(self):
scatter = Scatter([1, 2, 3]).options(invert_axes=True)
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['x'], np.array([1, 2, 3]))
self.assertEqual(state['data'][0]['y'], np.array([0, 1, 2]))
self.assertEqual(state['data'][0]['mode'], 'markers')
self.assertEqual(state['layout']['xaxis']['range'], [1, 3])
self.assertEqual(state['layout']['yaxis']['range'], [0, 2])
self.assertEqual(state['layout']['xaxis']['title'], 'y')
self.assertEqual(state['layout']['yaxis']['title'], 'x')
def test_scatter_color_mapped(self):
scatter = Scatter([3, 2, 1]).options(color='x')
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['marker']['color'], np.array([0, 1, 2]))
self.assertEqual(state['data'][0]['marker']['cmin'], 0)
self.assertEqual(state['data'][0]['marker']['cmax'], 2)
def test_scatter_size(self):
scatter = Scatter([3, 2, 1]).options(size='y')
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['marker']['size'], np.array([3, 2, 1]))
def test_scatter_colors(self):
scatter = Scatter([
(0, 1, 'red'), (1, 2, 'green'), (2, 3, 'blue')
], vdims=['y', 'color']).options(color='color')
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['marker']['color'],
np.array(['red', 'green', 'blue']))
def test_scatter_markers(self):
scatter = Scatter([
(0, 1, 'square'), (1, 2, 'circle'), (2, 3, 'triangle-up')
], vdims=['y', 'marker']).options(marker='marker')
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['marker']['symbol'],
np.array(['square', 'circle', 'triangle-up']))
|
markeTIC/OCB
|
refs/heads/8.0
|
addons/website_sale/__openerp__.py
|
299
|
{
'name': 'eCommerce',
'category': 'Website',
'summary': 'Sell Your Products Online',
'website': 'https://www.odoo.com/page/e-commerce',
'version': '1.0',
'description': """
OpenERP E-Commerce
==================
""",
'author': 'OpenERP SA',
'depends': ['website', 'sale', 'payment'],
'data': [
'data/data.xml',
'views/views.xml',
'views/templates.xml',
'views/payment.xml',
'views/sale_order.xml',
'security/ir.model.access.csv',
'security/website_sale.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'application': True,
}
|
jirutka/ssh-ldap-pubkey
|
refs/heads/master
|
ssh_ldap_pubkey/exceptions.py
|
1
|
# -*- coding: utf-8 -*-
class Error(Exception):
def __init__(self, msg, code=1):
self.msg = msg
self.code = code
def __str__(self):
return self.msg
class ConfigError(Error): pass
class InsufficientAccessError(Error): pass
class InvalidCredentialsError(Error): pass
class InvalidPubKeyError(Error): pass
class LDAPConnectionError(Error): pass
class NoPubKeyFoundError(Error): pass
class PubKeyAlreadyExistsError(Error): pass
class UserEntryNotFoundError(Error): pass
|
globz-eu/infrastructure
|
refs/heads/master
|
chef-repo/cookbooks/install_scripts/files/scripts/tests/test_commandfileutils.py
|
1
|
from unittest import TestCase
from unittest.mock import call
from unittest import mock
import os
import re
import shutil
import stat
import datetime
from utilities.commandfileutils import CommandFileUtils
from tests.conf_tests import DIST_VERSION, LOG_FILE, TEST_DIR, LOG_LEVEL
from tests.runandlogtest import RunAndLogTest
from tests.mocks.commandfileutils_mocks import own_app_mock
from tests.helpers import remove_test_dir
__author__ = 'Stefan Dieterle'
class RunCommandTest(TestCase):
"""
tests RunCommand methods
"""
def setUp(self):
self.dist_version = DIST_VERSION
self.log_file = LOG_FILE
self.log_level = LOG_LEVEL
if os.path.exists(self.log_file):
os.remove(self.log_file)
remove_test_dir()
def log(self, message, test=True):
"""
tests the presence or absence of a message or regex in the log file
:param message: message to test
:param test: tests presence (True) or absence (False)
"""
with open(self.log_file) as log:
log_list = [l[20:] for l in log]
if test:
self.assertTrue('%s\n' % message in log_list, log_list)
else:
self.assertFalse('%s\n' % message in log_list, log_list)
def test_run_command(self):
"""
tests run_and_log runs given command, exits on error and writes to log
"""
cmd = ['ls', '-la']
msg = 'successfully ran command'
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
args = (cmd, msg)
runlog.run_command(*args)
self.log('INFO: %s' % msg)
def test_run_command_exits_on_error(self):
"""
tests run_and_log runs given command, exits on error and writes to log
"""
cmd = ['ls', 'fjlskhgtioeb.bla']
msg = 'successfully ran command'
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
args = (cmd, msg)
try:
runlog.run_command(*args)
self.fail('command did not raise error')
except SystemExit:
self.log('ERROR: ls fjlskhgtioeb.bla exited with exit code 2')
def test_run_command_exits_on_error_and_does_not_log_when_log_error_is_false(self):
"""
tests run_and_log runs given command, exits on error and does not write to log when log_error is false
"""
cmd = ['ls', 'fjlskhgtioeb.bla']
msg = 'successfully ran command'
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
args = (cmd, msg)
try:
runlog.run_command(*args, log_error=False)
self.fail('command did not raise error')
except SystemExit:
with open(self.log_file) as log:
log_content = log.readlines()
self.assertFalse('ERROR: ls fjlskhgtioeb.bla exited with exit code 2' in log_content, log_content)
class CommandFileUtilsTest(RunAndLogTest):
"""
Tests CommandFileUtils
"""
def setUp(self):
RunAndLogTest.setUp(self)
self.test_dir = TEST_DIR
if self.dist_version == '14.04':
self.python_version = 'python3.4'
elif self.dist_version == '16.04':
self.python_version = 'python3.5'
def test_commandfileutils_exits_on_unknown_dist_version(self):
try:
CommandFileUtils('Invalid_dist_version', self.log_file, self.log_level)
except SystemExit as error:
self.assertEqual(1, error.code, 'CommandFileUtils exited with: %s' % str(error))
self.log('CRITICAL: distribution not supported')
def test_write_to_log(self):
"""
tests that write_to_log writes messages to log
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'DEBUG')
msgs = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
['warning message', 'WARNING'],
['error message', 'ERROR'],
['critical message', 'CRITICAL']
]
for m, ll in msgs:
runlog.write_to_log(m, ll)
self.log('%s: %s' % (ll, m))
def test_write_sequencially_to_log(self):
"""
tests that write_to_log writes all messages to log
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'DEBUG')
msgs = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
['warning message', 'WARNING'],
['error message', 'ERROR'],
['critical message', 'CRITICAL']
]
for m, ll in msgs:
runlog.write_to_log(m, ll)
for m, ll in msgs:
self.log('%s: %s' % (ll, m))
def test_write_to_log_adds_timestamp_to_message(self):
"""
tests that write_to_log adds the current time to messages
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'DEBUG')
msgs = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
['warning message', 'WARNING'],
['error message', 'ERROR'],
['critical message', 'CRITICAL']
]
now = datetime.datetime.utcnow()
for m, ll in msgs:
runlog.write_to_log(m, ll)
with open(self.log_file) as log:
log_list = [l[:19] for l in log][-5:]
for l in log_list:
self.assertEqual(now.strftime('%Y-%m-%d %H:%M:%S'), l, l)
def test_write_to_log_exits_when_log_level_is_not_specified(self):
"""
tests that write_to_log uses default log level (DEBUG) when level is not specified and exits when log level is
invalid
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'DEBUG')
msgs = [
['warning message', 'WARNING'],
['other warning message', 'WARNING'],
['debug message', 'IMPORTANT'],
['info message', 'INFO'],
['default info message', False],
]
for m, ll in msgs:
try:
runlog.write_to_log(m, ll)
self.log('%s: %s' % (ll, m))
except SystemExit as error:
self.assertEqual(1, error.code, '%s exited with: %s' % ('write_to_log', str(error)))
self.log('ERROR: log level "%s" is not specified or not valid' % ll)
def test_write_to_log_only_logs_messages_of_appropriate_log_level(self):
"""
tests that write_to_log only writes messages with appropriate log level to log
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'ERROR')
msgs_log = [
['error message', 'ERROR'],
['CRITICAL message', 'CRITICAL']
]
msgs_no_log = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
]
for m, ll in msgs_log:
runlog.write_to_log(m, ll)
self.log('%s: %s' % (ll, m))
for m, ll in msgs_no_log:
runlog.write_to_log(m, ll)
self.log('%s: %s' % (ll, m), test=False)
def test_run_command(self):
"""
tests run_and_log runs given command, exits on error and writes to log
"""
cmd = ['ls', '-la']
msg = 'successfully ran command'
func = 'run_and_log'
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
args = (cmd, msg)
self.run_success([cmd], ['%s' % msg], func, runlog.run_command, args)
self.run_error(cmd, func, runlog.run_command, args)
def test_walktree(self):
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.walktree(self.test_dir, runlog.write_to_log, ('INFO', ), runlog.write_to_log, ('INFO', ))
paths = ['/tmp/scripts_test/dir',
'/tmp/scripts_test/dir/file']
for p in paths:
self.log('INFO: %s' % p)
def test_walktree_with_no_file_function(self):
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.walktree(self.test_dir, d_callback=runlog.write_to_log, d_args=('INFO', ))
paths = ['/tmp/scripts_test/dir']
for p in paths:
self.log('INFO: %s' % p)
self.log('ERROR:', test=False, regex=True)
def test_walktree_with_no_file_function_args(self):
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.walktree(self.test_dir, f_callback=runlog.write_to_log)
paths = ['/tmp/scripts_test/dir/file']
for p in paths:
self.log('DEBUG: %s' % p)
def test_walktree_with_no_directory_function(self):
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.walktree(self.test_dir, f_callback=runlog.write_to_log, f_args=('INFO', ))
paths = ['/tmp/scripts_test/dir/file']
for p in paths:
self.log('INFO: %s' % p)
self.log('ERROR:', test=False, regex=True)
def test_walktree_with_no_directory_function_args(self):
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.walktree(self.test_dir, d_callback=runlog.write_to_log)
paths = ['/tmp/scripts_test/dir']
for p in paths:
self.log('DEBUG: %s' % p)
def test_walktree_exits_when_it_encounters_permission_error(self):
"""
tests that walktree exits when it encounters a permission error while walking
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
try:
runlog.walktree('/etc', os.path.isfile, (), os.listdir, ())
except SystemExit as error:
self.assertEqual(1, error.code, '%s exited with: %s' % ('walktree', str(error)))
self.log('ERROR: Permission denied on: /etc/cups/ssl')
def test_permissions_recursive(self):
"""
tests permissions assigns permissions recursively and writes to log
"""
test_permissions = [
['500', '700', '-r-x------', 'drwx------'],
['400', '500', '-r--------', 'dr-x------'],
['550', '770', '-r-xr-x---', 'drwxrwx---'],
['440', '550', '-r--r-----', 'dr-xr-x---'],
['644', '755', '-rw-r--r--', 'drwxr-xr-x'],
['755', '755', '-rwxr-xr-x', 'drwxr-xr-x']
]
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
for i in test_permissions:
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog.permissions(self.test_dir, i[0], i[1], recursive=True)
app_home_files = []
app_home_dirs = []
for root, dirs, files in os.walk(self.test_dir):
for name in files:
app_home_files.append(os.path.join(root, name))
for name in dirs:
app_home_dirs.append(os.path.join(root, name))
app_home_dirs.append(self.test_dir)
for a in app_home_files:
self.assertEqual(i[2], stat.filemode(os.stat(a).st_mode), stat.filemode(os.stat(a).st_mode))
for a in app_home_dirs:
self.assertEqual(i[3], stat.filemode(os.stat(a).st_mode), stat.filemode(os.stat(a).st_mode))
self.log('INFO: changed permissions of %s files to %s and directories to %s' % (
self.test_dir, i[0], i[1]
))
os.chmod(self.test_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
for root, dirs, files in os.walk(self.test_dir):
for name in dirs:
os.chmod(os.path.join(root, name), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
shutil.rmtree(self.test_dir)
def test_permissions_non_recursive(self):
"""
tests permissions assigns permissions recursively and writes to log
"""
test_permissions = [
[{'path': '/tmp/scripts_test', 'dir_permissions': '500'}, 'dr-x------'],
[{'path': '/tmp/scripts_test/dir', 'dir_permissions': '770'},
'drwxrwx---'],
[{'path': '/tmp/scripts_test/dir/file', 'file_permissions': '400'},
'-r--------'],
]
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
for i in test_permissions:
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
runlog.permissions(**i[0])
self.assertEqual(i[1], stat.filemode(os.stat(i[0]['path']).st_mode), stat.filemode(os.stat(i[0]['path']).st_mode))
if os.path.isdir(i[0]['path']):
self.log('INFO: changed permissions of %s to %s' % (i[0]['path'], i[0]['dir_permissions']))
elif os.path.isfile(i[0]['path']):
self.log('INFO: changed permissions of %s to %s' % (i[0]['path'], i[0]['file_permissions']))
os.chmod(self.test_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
for root, dirs, files in os.walk(self.test_dir):
for name in dirs:
os.chmod(os.path.join(root, name), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
shutil.rmtree(self.test_dir)
def test_check_pending_returns_correct_list(self):
"""
tests that check_pending only returns directories for pending or acceptance tests
"""
sample_dirs = ['/bla/pending_tests', '/bli/blo/acceptance_tests', '/bla/blup/some_other_dir']
expected_dirs = ['/bla/pending_tests', '/bli/blo/acceptance_tests']
cfu = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
for s in sample_dirs:
cfu.check_pending(s)
self.assertEqual(expected_dirs, cfu.pending_dirs, cfu.pending_dirs)
def test_get_pending_dirs_returns_dirs_with_pending_tests(self):
"""
tests that get_pending_dirs returns a list of directory paths for pending tests
"""
os.makedirs(os.path.join(self.test_dir, 'dir', 'acceptance_tests'), exist_ok=True)
os.makedirs(os.path.join(self.test_dir, 'dir', 'dir', 'functional_tests', 'pending_tests'), exist_ok=True)
os.makedirs(os.path.join(self.test_dir, 'dir', 'base', 'unit_tests', 'pending_tests'), exist_ok=True)
cfu = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
pending_dirs = cfu.get_pending_dirs(self.test_dir, 'dir')
expected_pending_dirs = [
os.path.join('.', 'acceptance_tests'),
os.path.join('.', 'base', 'unit_tests', 'pending_tests'),
os.path.join('.', 'dir', 'functional_tests', 'pending_tests'),
]
self.assertEqual(expected_pending_dirs, pending_dirs, pending_dirs)
@mock.patch.object(CommandFileUtils, 'own', side_effect=own_app_mock)
def test_own_manages_ownership(self, own_app_mock):
"""
tests that own manages ownership and writes to log.
"""
app_home_nested_file = os.path.join(self.test_dir, 'dir', 'file')
os.makedirs(os.path.join(self.test_dir, 'dir'))
with open(app_home_nested_file, 'w') as file:
file.write('some text')
user = 'app_user'
group = 'app_user'
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
runlog.own(self.test_dir, user, group)
self.assertEqual([call(self.test_dir, user, group)], own_app_mock.mock_calls, own_app_mock.mock_calls)
self.log('INFO: changed ownership of %s to %s:%s' % (self.test_dir, user, user))
def test_check_process(self):
"""
tests that check_process returns True when process is running and False otherwise
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, self.log_level)
proc = runlog.check_process('python')
self.assertTrue(proc, '%s process is running: %s' % ('python', proc))
proc = runlog.check_process('SomeVeryUnlikelyProcessName')
self.assertFalse(proc, proc)
class WriteToLogTest(TestCase):
"""
Tests write_to_log functionality in TestCase situation (when log file is deleted between tests)
"""
def setUp(self):
self.dist_version = DIST_VERSION
self.log_file = LOG_FILE
if os.path.isfile(self.log_file):
os.remove(self.log_file)
def log(self, message, test=True, regex=False):
"""
tests the presence or absence of a message or regex in the log file
:param message: message to test
:param test: tests presence (True) or absence (False)
:param regex: tests using regex if True
"""
with open(self.log_file) as log:
log_list = [l[20:] for l in log]
if test:
if regex:
matches = [l for l in log_list if re.match(message, l)]
self.assertTrue(matches, '%s not found' % message)
else:
self.assertTrue('%s\n' % message in log_list, 'message: \'%s\', log_list: %s' % (message, log_list))
else:
if regex:
matches = [l for l in log_list if re.match(message, l)]
self.assertFalse(matches, '"%s" found in %s' % (message, matches))
else:
self.assertFalse('%s\n' % message in log_list, log_list)
def test_basic_functionality(self):
"""
tests basic logging functionality
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'DEBUG')
msgs = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
['warning message', 'WARNING'],
['error message', 'ERROR'],
['critical message', 'CRITICAL']
]
for m, ll in msgs:
runlog.write_to_log(m, ll)
for m, ll in msgs:
self.log('%s: %s' % (ll, m))
def test_level_functionality(self):
"""
tests logging functionality when log level is higher than DEBUG
"""
runlog = CommandFileUtils(self.dist_version, self.log_file, 'INFO')
msgs = [
['debug message', 'DEBUG'],
['info message', 'INFO'],
['warning message', 'WARNING'],
['error message', 'ERROR'],
['critical message', 'CRITICAL']
]
for m, ll in msgs:
runlog.write_to_log(m, ll)
for m, ll in msgs[1:]:
self.log('%s: %s' % (ll, m))
self.log('%s: %s' % (msgs[0][1], msgs[0][0]), test=False)
|
ceache/treadmill
|
refs/heads/master
|
lib/python/treadmill/runtime/__init__.py
|
2
|
"""Treadmill runtime framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import itertools
import logging
import os
import random
import socket
import tarfile
import six
if os.name == 'posix':
import stat
from treadmill import appcfg
from treadmill import exc
from treadmill import fs
from treadmill import utils
from treadmill import plugin_manager
from treadmill.appcfg import abort as app_abort
from treadmill.appcfg import manifest as app_manifest
STATE_JSON = 'state.json'
_LOGGER = logging.getLogger(__name__)
_ARCHIVE_LIMIT = utils.size_to_bytes('1G')
_RUNTIME_NAMESPACE = 'treadmill.runtime'
if os.name == 'posix':
# Disable C0413: should be placed at the top of the module.
from treadmill import iptables # pylint: disable=c0413
PORT_SPAN = iptables.PORT_SPAN
PROD_PORT_LOW = iptables.PROD_PORT_LOW
PROD_PORT_HIGH = iptables.PROD_PORT_HIGH
NONPROD_PORT_LOW = iptables.NONPROD_PORT_LOW
NONPROD_PORT_HIGH = iptables.NONPROD_PORT_HIGH
else:
PORT_SPAN = 8192
PROD_PORT_LOW = 32768
PROD_PORT_HIGH = PROD_PORT_LOW + PORT_SPAN - 1
NONPROD_PORT_LOW = PROD_PORT_LOW + PORT_SPAN
NONPROD_PORT_HIGH = NONPROD_PORT_LOW + PORT_SPAN - 1
def get_runtime_cls(runtime_name):
"""Get runtime classs
Raise Key exception if runtime class does not exist
"""
try:
runtime_cls = plugin_manager.load(_RUNTIME_NAMESPACE, runtime_name)
return runtime_cls
except KeyError:
_LOGGER.error('Runtime not supported: %s', runtime_name)
raise
def get_runtime(runtime_name, tm_env, container_dir, param=None):
"""Gets the runtime implementation with the given name."""
runtime_cls = get_runtime_cls(runtime_name)
return runtime_cls(tm_env, container_dir, param)
def load_app_safe(container, container_dir, app_json=STATE_JSON):
"""Load app manifest as object.
If app manifest is corrupted or invalid, return object with key attributes.
"""
try:
return load_app(container_dir, app_json=app_json)
except ValueError as err:
_LOGGER.error('Manifest file is corrupted or invalid: %s', err)
appname = appcfg.app_name(container)
return utils.to_obj({
'name': appname,
'app': appcfg.appname_basename(appname),
'task': appcfg.appname_task_id(appname),
'uniqueid': appcfg.app_unique_id(container),
})
def load_app(container_dir, app_json=STATE_JSON):
"""Load app manifest as object."""
manifest_file = os.path.join(container_dir, app_json)
try:
manifest = app_manifest.read(manifest_file)
_LOGGER.debug('Manifest: %r', manifest)
return utils.to_obj(manifest)
except IOError as err:
if err.errno != errno.ENOENT:
raise
_LOGGER.info('Manifest file does not exist: %s', manifest_file)
return None
def save_app(manifest, container_dir, app_json=STATE_JSON):
"""Saves app manifest and freezes to object."""
# Save the manifest with allocated vip and ports in the state
#
state_file = os.path.join(container_dir, app_json)
fs.write_safe(
state_file,
lambda f: f.writelines(
utils.json_genencode(manifest)
),
mode='w',
# chmod for the file to be world readable.
permission=0o644
)
# Freeze the app data into a namedtuple object
return utils.to_obj(manifest)
def _allocate_sockets(environment, host_ip, sock_type, count):
"""Return a list of `count` socket bound to an ephemeral port.
"""
# TODO: this should probably be abstracted away
if environment in ('uat', 'prod'):
port_pool = six.moves.range(PROD_PORT_LOW, PROD_PORT_HIGH + 1)
else:
port_pool = six.moves.range(NONPROD_PORT_LOW, NONPROD_PORT_HIGH + 1)
port_pool = random.sample(port_pool, PORT_SPAN)
# socket objects are closed on GC so we need to return
# them and expect the caller to keep them around while needed
sockets = []
for real_port in port_pool:
if len(sockets) == count:
break
socket_ = socket.socket(socket.AF_INET, sock_type)
try:
socket_.bind((host_ip, real_port))
if sock_type == socket.SOCK_STREAM:
socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_.listen(0)
except socket.error as err:
if err.errno == errno.EADDRINUSE:
continue
raise
if six.PY3:
# We want the sockets to survive an execv
socket_.set_inheritable(True)
sockets.append(socket_)
else:
raise exc.ContainerSetupError('{0} < {1}'.format(len(sockets), count),
app_abort.AbortedReason.PORTS)
return sockets
def _allocate_network_ports_proto(host_ip, manifest, proto, so_type):
"""Allocate ports for named and unnamed endpoints given protocol."""
ephemeral_count = manifest['ephemeral_ports'].get(proto, 0)
endpoints = [ep for ep in manifest['endpoints']
if ep.get('proto', 'tcp') == proto]
endpoints_count = len(endpoints)
sockets = _allocate_sockets(
manifest['environment'],
host_ip,
so_type,
endpoints_count + ephemeral_count
)
for idx, endpoint in enumerate(endpoints):
sock = sockets[idx]
endpoint['real_port'] = sock.getsockname()[1]
# Specifying port 0 tells appmgr that application wants to
# have same numeric port value in the container and in
# the public interface.
#
# This is needed for applications that advertise ports they
# listen on to other members of the app/cluster.
if endpoint['port'] == 0:
endpoint['port'] = endpoint['real_port']
# Ephemeral port are the rest of the ports
manifest['ephemeral_ports'][proto] = [
sock.getsockname()[1]
for sock in sockets[endpoints_count:]
]
return sockets
def allocate_network_ports(host_ip, manifest):
"""Allocate ports for named and unnamed endpoints.
:returns:
``list`` of bound sockets
"""
tcp_sockets = _allocate_network_ports_proto(host_ip,
manifest,
'tcp',
socket.SOCK_STREAM)
udp_sockets = _allocate_network_ports_proto(host_ip,
manifest,
'udp',
socket.SOCK_DGRAM)
return tcp_sockets + udp_sockets
def _cleanup_archive_dir(tm_env):
"""Delete old files from archive directory if space exceeds the threshold.
"""
archives = glob.glob(os.path.join(tm_env.archives_dir, '*'))
infos = []
dir_size = 0
for archive in archives:
try:
archive_stat = os.stat(archive)
except OSError as err:
if err.errno == errno.ENOENT:
continue
raise
dir_size += archive_stat.st_size
infos.append((archive_stat.st_mtime, archive_stat.st_size, archive))
if dir_size <= _ARCHIVE_LIMIT:
_LOGGER.info('Archive directory below threshold: %s', dir_size)
return
_LOGGER.info('Archive directory above threshold: %s gt %s',
dir_size, _ARCHIVE_LIMIT)
infos.sort()
while dir_size > _ARCHIVE_LIMIT:
ctime, size, archive = infos.pop(0)
dir_size -= size
_LOGGER.info('Unlink old archive %s: ctime: %s, size: %s',
archive, ctime, size)
fs.rm_safe(archive)
def archive_logs(tm_env, name, container_dir):
"""Archive latest sys and services logs."""
_cleanup_archive_dir(tm_env)
sys_archive_name = os.path.join(tm_env.archives_dir, name + '.sys.tar.gz')
app_archive_name = os.path.join(tm_env.archives_dir, name + '.app.tar.gz')
def _add(archive, filename):
"""Safely add file to archive."""
try:
archive.add(filename, filename[len(container_dir) + 1:])
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('File not found: %s', filename)
else:
raise
with tarfile.open(sys_archive_name, 'w:gz') as f:
logs = glob.glob(
os.path.join(container_dir, 'sys', '*', 'data', 'log', 'current'))
for log in logs:
_add(f, log)
metrics = glob.glob(os.path.join(container_dir, '*.rrd'))
for metric in metrics:
_add(f, metric)
yml_cfgs = glob.glob(os.path.join(container_dir, '*.yml'))
json_cfgs = glob.glob(os.path.join(container_dir, '*.json'))
for cfg in yml_cfgs + json_cfgs:
_add(f, cfg)
_add(f, os.path.join(container_dir, 'log', 'current'))
with tarfile.open(app_archive_name, 'w:gz') as f:
log_dirs = os.path.join(container_dir, 'services', '*', 'data', 'log')
current = glob.glob(os.path.join(log_dirs, 'current'))
rotated = glob.glob(os.path.join(log_dirs, '@*.[su]'))
# include only the most recently rotated log file and the current one
to_archive = sum([
sorted(files)[-1:]
for _, files in itertools.groupby(rotated, os.path.dirname)
], current)
for file_ in to_archive:
_add(f, file_)
|
nuuuboo/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_state_open.py
|
341
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Nukesor/Pueuew
|
refs/heads/master
|
pueue/client/factories.py
|
1
|
import pickle
from pueue.client.socket import connect_socket, receive_data, process_response
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate
def print_command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and prints the response of the daemon. If the daemon sends a
`response['status'] == 'error'`, the pueue client will exit with `1`.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message and print it. Exit with 1, if an error has been sent.
response = receive_data(client)
process_response(response)
return communicate
|
40223136/2015cdag1man
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/xml/dom/xmlbuilder.py
|
873
|
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
|
anielsen001/scipy
|
refs/heads/master
|
scipy/sparse/dia.py
|
34
|
"""Sparse DIAgonal format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dia_matrix', 'isspmatrix_dia']
import numpy as np
from .base import isspmatrix, _formats, spmatrix
from .data import _data_matrix
from .sputils import (isshape, upcast_char, getdtype, get_index_dtype,
get_sum_dtype, validateaxis)
from ._sparsetools import dia_matvec
class dia_matrix(_data_matrix):
"""Sparse matrix with DIAgonal storage
This can be instantiated in several ways:
dia_matrix(D)
with a dense matrix
dia_matrix(S)
with another sparse matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
format = 'dia'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix_dia(arg1):
if copy:
arg1 = arg1.copy()
self.data = arg1.data
self.offsets = arg1.offsets
self.shape = arg1.shape
elif isspmatrix(arg1):
if isspmatrix_dia(arg1) and copy:
A = arg1.copy()
else:
A = arg1.todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
self.data = np.zeros((0,0), getdtype(dtype, default=float))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.offsets = np.zeros((0), dtype=idx_dtype)
else:
try:
# Try interpreting it as (data, offsets)
data, offsets = arg1
except:
raise ValueError('unrecognized form for dia_matrix constructor')
else:
if shape is None:
raise ValueError('expected a shape argument')
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
self.offsets = np.atleast_1d(np.array(arg1[1],
dtype=get_index_dtype(maxval=max(shape)),
copy=copy))
self.shape = shape
else:
#must be dense, convert to COO first, then to DIA
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
A = coo_matrix(arg1, dtype=dtype, shape=shape).todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
if dtype is not None:
self.data = self.data.astype(dtype)
#check format
if self.offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if self.data.ndim != 2:
raise ValueError('data array must have rank 2')
if self.data.shape[0] != len(self.offsets):
raise ValueError('number of diagonals (%d) '
'does not match the number of offsets (%d)'
% (self.data.shape[0], len(self.offsets)))
if len(np.unique(self.offsets)) != len(self.offsets):
raise ValueError('offset array contains duplicate values')
def __repr__(self):
format = _formats[self.getformat()][1]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (%d diagonals) in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
format))
def _data_mask(self):
"""Returns a mask of the same shape as self.data, where
mask[i,j] is True when data[i,j] corresponds to a stored element."""
num_rows, num_cols = self.shape
offset_inds = np.arange(self.data.shape[1])
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
return mask
def count_nonzero(self):
mask = self._data_mask()
return np.count_nonzero(self.data[mask])
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for DIA format")
M,N = self.shape
nnz = 0
for k in self.offsets:
if k > 0:
nnz += min(M,N-k)
else:
nnz += min(M+k,N)
return int(nnz)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def sum(self, axis=None, dtype=None, out=None):
validateaxis(axis)
if axis is not None and axis < 0:
axis += 2
res_dtype = get_sum_dtype(self.dtype)
num_rows, num_cols = self.shape
ret = None
if axis == 0:
mask = self._data_mask()
x = (self.data * mask).sum(axis=0)
if x.shape[0] == num_cols:
res = x
else:
res = np.zeros(num_cols, dtype=x.dtype)
res[:x.shape[0]] = x
ret = np.matrix(res, dtype=res_dtype)
else:
row_sums = np.zeros(num_rows, dtype=res_dtype)
one = np.ones(num_cols, dtype=res_dtype)
dia_matvec(num_rows, num_cols, len(self.offsets),
self.data.shape[1], self.offsets, self.data, one, row_sums)
row_sums = np.matrix(row_sums)
if axis is None:
return row_sums.sum(dtype=dtype, out=out)
if axis is not None:
row_sums = row_sums.T
ret = np.matrix(row_sums.sum(axis=axis))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _mul_vector(self, other):
x = other
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
x.dtype.char))
L = self.data.shape[1]
M,N = self.shape
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
return y
def _mul_multimatrix(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def _setdiag(self, values, k=0):
M, N = self.shape
if values.ndim == 0:
# broadcast
values_n = np.inf
else:
values_n = len(values)
if k < 0:
n = min(M + k, N, values_n)
min_index = 0
max_index = n
else:
n = min(M, N - k, values_n)
min_index = k
max_index = k + n
if values.ndim != 0:
# allow also longer sequences
values = values[:n]
if k in self.offsets:
self.data[self.offsets == k, min_index:max_index] = values
else:
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
m = max(max_index, self.data.shape[1])
data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype)
data[:-1,:self.data.shape[1]] = self.data
data[-1, min_index:max_index] = values
self.data = data
def todia(self, copy=False):
if copy:
return self.copy()
else:
return self
todia.__doc__ = spmatrix.todia.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
num_rows, num_cols = self.shape
max_dim = max(self.shape)
# flip diagonal offsets
offsets = -self.offsets
# re-align the data matrix
r = np.arange(len(offsets), dtype=np.intc)[:, None]
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
pad_amount = max(0, max_dim-self.data.shape[1])
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
dtype=self.data.dtype)))
data = data[r, c]
return dia_matrix((data, offsets), shape=(
num_cols, num_rows), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def diagonal(self):
idx, = np.where(self.offsets == 0)
n = min(self.shape)
if idx.size == 0:
return np.zeros(n, dtype=self.data.dtype)
return self.data[idx[0],:n]
diagonal.__doc__ = spmatrix.diagonal.__doc__
def tocsc(self, copy=False):
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
idx_dtype = get_index_dtype(maxval=max(self.shape))
indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0))
indptr[offset_len+1:] = indptr[offset_len]
indices = row.T[mask.T].astype(idx_dtype, copy=False)
data = self.data.T[mask.T]
return csc_matrix((data, indices, indptr), shape=self.shape,
dtype=self.dtype)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=False):
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
row = row[mask]
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
data = self.data[mask]
from .coo import coo_matrix
A = coo_matrix((data,(row,col)), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = spmatrix.tocoo.__doc__
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data,self.offsets), shape=self.shape)
def isspmatrix_dia(x):
return isinstance(x, dia_matrix)
|
IRI-Research/django
|
refs/heads/master
|
tests/utils_tests/test_dateparse.py
|
25
|
from __future__ import unicode_literals
from datetime import date, time, datetime
import unittest
from django.utils.dateparse import parse_date, parse_time, parse_datetime
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
|
saumishr/django
|
refs/heads/master
|
django/contrib/gis/tests/layermap/models.py
|
239
|
from django.contrib.gis.db import models
class State(models.Model):
name = models.CharField(max_length=20)
objects = models.GeoManager()
class County(models.Model):
name = models.CharField(max_length=25)
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
objects = models.GeoManager()
class CountyFeat(models.Model):
name = models.CharField(max_length=25)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
class City(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
objects = models.GeoManager()
class Interstate(models.Model):
name = models.CharField(max_length=20)
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
objects = models.GeoManager()
# Same as `City` above, but for testing model inheritance.
class CityBase(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
objects = models.GeoManager()
class ICity1(CityBase):
dt = models.DateField()
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Invalid(models.Model):
point = models.PointField()
# Mapping dictionaries for the models above.
co_mapping = {'name' : 'Name',
'state' : {'name' : 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly' : 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name' : 'Name',
'poly' : 'POLYGON',
}
city_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'dt' : 'Created',
'point' : 'POINT',
}
inter_mapping = {'name' : 'Name',
'length' : 'Length',
'path' : 'LINESTRING',
}
|
JioCloud/oslo.log
|
refs/heads/master
|
oslo_log/tests/unit/test_log.py
|
3
|
# Copyright (c) 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import tempfile
import mock
from oslo_config import cfg
from oslo_config import fixture as fixture_config # noqa
from oslo_context import context
from oslo_context import fixture as fixture_context
from oslo_i18n import fixture as fixture_trans
from oslo_serialization import jsonutils
from oslotest import base as test_base
import six
from oslo_log import _options
from oslo_log import formatters
from oslo_log import handlers
from oslo_log import log
def _fake_context():
return context.RequestContext(1, 1, overwrite=True)
class CommonLoggerTestsMixIn(object):
"""These tests are shared between LoggerTestCase and
LazyLoggerTestCase.
"""
def setUp(self):
super(CommonLoggerTestsMixIn, self).setUp()
# common context has different fields to the defaults in log.py
self.config_fixture = self.useFixture(
fixture_config.Config(cfg.ConfigOpts()))
self.config = self.config_fixture.config
self.CONF = self.config_fixture.conf
log.register_options(self.config_fixture.conf)
self.config(logging_context_format_string='%(asctime)s %(levelname)s '
'%(name)s [%(request_id)s '
'%(user)s %(tenant)s] '
'%(message)s')
self.log = None
log._setup_logging_from_conf(self.config_fixture.conf, 'test', 'test')
def test_handlers_have_context_formatter(self):
formatters_list = []
for h in self.log.logger.handlers:
f = h.formatter
if isinstance(f, formatters.ContextFormatter):
formatters_list.append(f)
self.assertTrue(formatters_list)
self.assertEqual(len(formatters_list), len(self.log.logger.handlers))
def test_handles_context_kwarg(self):
self.log.info("foo", context=_fake_context())
self.assertTrue(True) # didn't raise exception
def test_will_be_verbose_if_verbose_flag_set(self):
self.config(verbose=True)
log.setup(self.CONF, "test_is_verbose")
logger = logging.getLogger("test_is_verbose")
self.assertEqual(logging.INFO, logger.getEffectiveLevel())
def test_will_be_debug_if_debug_flag_set(self):
self.config(debug=True)
log.setup(self.CONF, "test_is_debug")
logger = logging.getLogger("test_is_debug")
self.assertEqual(logging.DEBUG, logger.getEffectiveLevel())
def test_will_not_be_verbose_if_verbose_flag_not_set(self):
self.config(verbose=False)
log.setup(self.CONF, "test_is_not_verbose")
logger = logging.getLogger("test_is_not_verbose")
self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
def test_no_logging_via_module(self):
for func in ('critical', 'error', 'exception', 'warning', 'warn',
'info', 'debug', 'log'):
self.assertRaises(AttributeError, getattr, log, func)
class LoggerTestCase(CommonLoggerTestsMixIn, test_base.BaseTestCase):
def setUp(self):
super(LoggerTestCase, self).setUp()
self.log = log.getLogger(None)
class BaseTestCase(test_base.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.context_fixture = self.useFixture(
fixture_context.ClearRequestContext())
self.config_fixture = self.useFixture(
fixture_config.Config(cfg.ConfigOpts()))
self.config = self.config_fixture.config
self.CONF = self.config_fixture.conf
log.register_options(self.CONF)
class LogTestBase(BaseTestCase):
"""Base test class that provides some convenience functions."""
def _add_handler_with_cleanup(self, log_instance, handler=None,
formatter=None):
"""Add a log handler to a log instance.
This function should be used to add handlers to loggers in test cases
instead of directly adding them to ensure that the handler is
correctly removed at the end of the test. Otherwise the handler may
be left on the logger and interfere with subsequent tests.
:param log_instance: The log instance to which the handler will be
added.
:param handler: The handler class to be added. Must be the class
itself, not an instance.
:param formatter: The formatter class to set on the handler. Must be
the class itself, not an instance.
"""
self.stream = six.StringIO()
if handler is None:
handler = logging.StreamHandler
self.handler = handler(self.stream)
if formatter is None:
formatter = formatters.ContextFormatter
self.handler.setFormatter(formatter())
log_instance.logger.addHandler(self.handler)
self.addCleanup(log_instance.logger.removeHandler, self.handler)
def _set_log_level_with_cleanup(self, log_instance, level):
"""Set the log level of a logger for the duration of a test.
Use this function to set the log level of a logger and add the
necessary cleanup to reset it back to default at the end of the test.
:param log_instance: The logger whose level will be changed.
:param level: The new log level to use.
"""
self.level = log_instance.logger.getEffectiveLevel()
log_instance.logger.setLevel(level)
self.addCleanup(log_instance.logger.setLevel, self.level)
class LogHandlerTestCase(BaseTestCase):
def test_log_path_logdir(self):
self.config(log_dir='/some/path', log_file=None)
self.assertEqual(log._get_log_file_path(self.config_fixture.conf,
binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_logfile(self):
self.config(log_file='/some/path/foo-bar.log')
self.assertEqual(log._get_log_file_path(self.config_fixture.conf,
binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_none(self):
self.config(log_dir=None, log_file=None)
self.assertIsNone(log._get_log_file_path(self.config_fixture.conf,
binary='foo-bar'))
def test_log_path_logfile_overrides_logdir(self):
self.config(log_dir='/some/other/path',
log_file='/some/path/foo-bar.log')
self.assertEqual(log._get_log_file_path(self.config_fixture.conf,
binary='foo-bar'),
'/some/path/foo-bar.log')
class SysLogHandlersTestCase(BaseTestCase):
"""Test for standard and RFC compliant Syslog handlers."""
def setUp(self):
super(SysLogHandlersTestCase, self).setUp()
self.facility = logging.handlers.SysLogHandler.LOG_USER
self.rfclogger = handlers.RFCSysLogHandler(facility=self.facility)
self.rfclogger.binary_name = 'Foo_application'
self.logger = logging.handlers.SysLogHandler(facility=self.facility)
self.logger.binary_name = 'Foo_application'
def test_rfc_format(self):
"""Ensure syslog msg contains APP-NAME for RFC wrapped handler."""
logrecord = logging.LogRecord('name', 'WARN', '/tmp', 1,
'Message', None, None)
expected = logging.LogRecord('name', 'WARN', '/tmp', 1,
'Foo_application Message', None, None)
self.assertEqual(self.rfclogger.format(logrecord),
expected.getMessage())
def test_standard_format(self):
"""Ensure syslog msg isn't modified for standard handler."""
logrecord = logging.LogRecord('name', 'WARN', '/tmp', 1,
'Message', None, None)
expected = logrecord
self.assertEqual(self.logger.format(logrecord),
expected.getMessage())
class LogLevelTestCase(BaseTestCase):
def setUp(self):
super(LogLevelTestCase, self).setUp()
levels = self.CONF.default_log_levels
levels.append("nova-test=INFO")
levels.append("nova-not-debug=WARN")
self.config(default_log_levels=levels,
verbose=True)
log.setup(self.CONF, 'testing')
self.log = log.getLogger('nova-test')
self.log_no_debug = log.getLogger('nova-not-debug')
def test_is_enabled_for(self):
self.assertTrue(self.log.isEnabledFor(logging.INFO))
self.assertFalse(self.log_no_debug.isEnabledFor(logging.DEBUG))
def test_has_level_from_flags(self):
self.assertEqual(logging.INFO, self.log.logger.getEffectiveLevel())
def test_child_log_has_level_of_parent_flag(self):
l = log.getLogger('nova-test.foo')
self.assertEqual(logging.INFO, l.logger.getEffectiveLevel())
class JSONFormatterTestCase(LogTestBase):
def setUp(self):
super(JSONFormatterTestCase, self).setUp()
self.log = log.getLogger('test-json')
self._add_handler_with_cleanup(self.log,
formatter=formatters.JSONFormatter)
self._set_log_level_with_cleanup(self.log, logging.DEBUG)
def test_json(self):
test_msg = 'This is a %(test)s line'
test_data = {'test': 'log'}
local_context = _fake_context()
self.log.debug(test_msg, test_data, key='value', context=local_context)
data = jsonutils.loads(self.stream.getvalue())
self.assertTrue(data)
self.assertTrue('extra' in data)
extra = data['extra']
self.assertEqual('value', extra['key'])
self.assertEqual(local_context.auth_token, extra['auth_token'])
self.assertEqual(local_context.user, extra['user'])
self.assertEqual('test-json', data['name'])
self.assertEqual(test_msg % test_data, data['message'])
self.assertEqual(test_msg, data['msg'])
self.assertEqual(test_data, data['args'])
self.assertEqual('test_log.py', data['filename'])
self.assertEqual('test_json', data['funcname'])
self.assertEqual('DEBUG', data['levelname'])
self.assertEqual(logging.DEBUG, data['levelno'])
self.assertFalse(data['traceback'])
def test_json_exception(self):
test_msg = 'This is %s'
test_data = 'exceptional'
try:
raise Exception('This is exceptional')
except Exception:
self.log.exception(test_msg, test_data)
data = jsonutils.loads(self.stream.getvalue())
self.assertTrue(data)
self.assertTrue('extra' in data)
self.assertEqual('test-json', data['name'])
self.assertEqual(test_msg % test_data, data['message'])
self.assertEqual(test_msg, data['msg'])
self.assertEqual([test_data], data['args'])
self.assertEqual('ERROR', data['levelname'])
self.assertEqual(logging.ERROR, data['levelno'])
self.assertTrue(data['traceback'])
class ContextFormatterTestCase(LogTestBase):
def setUp(self):
super(ContextFormatterTestCase, self).setUp()
self.config(logging_context_format_string="HAS CONTEXT "
"[%(request_id)s]: "
"%(message)s",
logging_default_format_string="NOCTXT: %(message)s",
logging_debug_format_suffix="--DBG")
self.log = log.getLogger('') # obtain root logger instead of 'unknown'
self._add_handler_with_cleanup(self.log)
self._set_log_level_with_cleanup(self.log, logging.DEBUG)
self.trans_fixture = self.useFixture(fixture_trans.Translation())
def test_uncontextualized_log(self):
self.log.info("foo")
self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
def test_contextualized_log(self):
ctxt = _fake_context()
self.log.info("bar", context=ctxt)
expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
self.assertEqual(expected, self.stream.getvalue())
def test_context_is_taken_from_tls_variable(self):
ctxt = _fake_context()
self.log.info("bar")
expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
self.assertEqual(expected, self.stream.getvalue())
def test_contextual_information_is_imparted_to_3rd_party_log_records(self):
ctxt = _fake_context()
sa_log = logging.getLogger('sqlalchemy.engine')
sa_log.setLevel(logging.INFO)
sa_log.info('emulate logging within sqlalchemy')
expected = ("HAS CONTEXT [%s]: emulate logging within "
"sqlalchemy\n" % ctxt.request_id)
self.assertEqual(expected, self.stream.getvalue())
def test_message_logging_3rd_party_log_records(self):
ctxt = _fake_context()
ctxt.request_id = six.text_type('99')
sa_log = logging.getLogger('sqlalchemy.engine')
sa_log.setLevel(logging.INFO)
message = self.trans_fixture.lazy('test ' + six.unichr(128))
sa_log.info(message)
expected = ("HAS CONTEXT [%s]: %s\n" % (ctxt.request_id,
six.text_type(message)))
self.assertEqual(expected, self.stream.getvalue())
def test_debugging_log(self):
self.log.debug("baz")
self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
def test_message_logging(self):
# NOTE(luisg): Logging message objects with unicode objects
# may cause trouble by the logging mechanism trying to coerce
# the Message object, with a wrong encoding. This test case
# tests that problem does not occur.
ctxt = _fake_context()
ctxt.request_id = six.text_type('99')
message = self.trans_fixture.lazy('test ' + six.unichr(128))
self.log.info(message, context=ctxt)
expected = "HAS CONTEXT [%s]: %s\n" % (ctxt.request_id,
six.text_type(message))
self.assertEqual(expected, self.stream.getvalue())
def test_unicode_conversion_in_adapter(self):
ctxt = _fake_context()
ctxt.request_id = six.text_type('99')
message = "Exception is (%s)"
ex = Exception(self.trans_fixture.lazy('test' + six.unichr(128)))
self.log.debug(message, ex, context=ctxt)
message = six.text_type(message) % ex
expected = "HAS CONTEXT [%s]: %s --DBG\n" % (ctxt.request_id,
message)
self.assertEqual(expected, self.stream.getvalue())
def test_unicode_conversion_in_formatter(self):
ctxt = _fake_context()
ctxt.request_id = six.text_type('99')
no_adapt_log = logging.getLogger('no_adapt')
no_adapt_log.setLevel(logging.INFO)
message = "Exception is (%s)"
ex = Exception(self.trans_fixture.lazy('test' + six.unichr(128)))
no_adapt_log.info(message, ex)
message = six.text_type(message) % ex
expected = "HAS CONTEXT [%s]: %s\n" % (ctxt.request_id,
message)
self.assertEqual(expected, self.stream.getvalue())
class ExceptionLoggingTestCase(LogTestBase):
"""Test that Exceptions are logged."""
def test_excepthook_logs_exception(self):
product_name = 'somename'
exc_log = log.getLogger(product_name)
self._add_handler_with_cleanup(exc_log)
excepthook = log._create_logging_excepthook(product_name)
try:
raise Exception('Some error happened')
except Exception:
excepthook(*sys.exc_info())
expected_string = ("CRITICAL somename [-] "
"Exception: Some error happened")
self.assertTrue(expected_string in self.stream.getvalue(),
msg="Exception is not logged")
def test_excepthook_installed(self):
log.setup(self.CONF, "test_excepthook_installed")
self.assertTrue(sys.excepthook != sys.__excepthook__)
class FancyRecordTestCase(LogTestBase):
"""Test how we handle fancy record keys that are not in the
base python logging.
"""
def setUp(self):
super(FancyRecordTestCase, self).setUp()
# NOTE(sdague): use the different formatters to demonstrate format
# string with valid fancy keys and without. Slightly hacky, but given
# the way log objects layer up seemed to be most concise approach
self.config(logging_context_format_string="%(color)s "
"[%(request_id)s]: "
"%(instance)s"
"%(resource)s"
"%(message)s",
logging_default_format_string="%(missing)s: %(message)s")
self.colorlog = log.getLogger()
self._add_handler_with_cleanup(self.colorlog, handlers.ColorHandler)
self._set_log_level_with_cleanup(self.colorlog, logging.DEBUG)
def test_unsupported_key_in_log_msg(self):
# NOTE(sdague): exception logging bypasses the main stream
# and goes to stderr. Suggests on a better way to do this are
# welcomed.
error = sys.stderr
sys.stderr = six.StringIO()
self.colorlog.info("foo")
self.assertNotEqual(sys.stderr.getvalue().find("KeyError: 'missing'"),
-1)
sys.stderr = error
def _validate_keys(self, ctxt, keyed_log_string):
infocolor = '\033[00;36m'
warncolor = '\033[01;33m'
infoexpected = "%s %s info\n" % (infocolor, keyed_log_string)
warnexpected = "%s %s warn\n" % (warncolor, keyed_log_string)
self.colorlog.info("info", context=ctxt)
self.assertEqual(infoexpected, self.stream.getvalue())
self.colorlog.warn("warn", context=ctxt)
self.assertEqual(infoexpected + warnexpected, self.stream.getvalue())
def test_fancy_key_in_log_msg(self):
ctxt = _fake_context()
self._validate_keys(ctxt, '[%s]:' % ctxt.request_id)
def test_instance_key_in_log_msg(self):
ctxt = _fake_context()
ctxt.resource_uuid = '1234'
self._validate_keys(ctxt, ('[%s]: [instance: %s]' %
(ctxt.request_id, ctxt.resource_uuid)))
def test_resource_key_in_log_msg(self):
infocolor = '\033[00;36m'
ctxt = _fake_context()
fake_resource = {'name': 'resource-202260f9-1224-'
'490d-afaf-6a744c13141f'}
self.colorlog.info("info", context=ctxt, resource=fake_resource)
infoexpected = "%s [%s]: [resource-202260f9-1224-490d-"\
"afaf-6a744c13141f] info\n" % (infocolor,
ctxt.request_id)
self.assertEqual(infoexpected, self.stream.getvalue())
def test_resource_key_dict_in_log_msg(self):
infocolor = '\033[00;36m'
ctxt = _fake_context()
fake_resource = {'type': 'fake_resource',
'id': '202260f9-1224-490d-afaf-6a744c13141f'}
self.colorlog.info("info", context=ctxt, resource=fake_resource)
infoexpected = "%s [%s]: [fake_resource-202260f9-1224-490d-"\
"afaf-6a744c13141f] info\n" % (infocolor,
ctxt.request_id)
self.assertEqual(infoexpected, self.stream.getvalue())
class DomainTestCase(LogTestBase):
def setUp(self):
super(DomainTestCase, self).setUp()
self.config(logging_context_format_string="[%(request_id)s]: "
"%(user_identity)s "
"%(message)s")
self.mylog = log.getLogger()
self._add_handler_with_cleanup(self.mylog)
self._set_log_level_with_cleanup(self.mylog, logging.DEBUG)
def _validate_keys(self, ctxt, keyed_log_string):
infoexpected = "%s info\n" % (keyed_log_string)
warnexpected = "%s warn\n" % (keyed_log_string)
self.mylog.info("info", context=ctxt)
self.assertEqual(infoexpected, self.stream.getvalue())
self.mylog.warn("warn", context=ctxt)
self.assertEqual(infoexpected + warnexpected, self.stream.getvalue())
def test_domain_in_log_msg(self):
ctxt = _fake_context()
ctxt.domain = 'mydomain'
ctxt.project_domain = 'myprojectdomain'
ctxt.user_domain = 'myuserdomain'
user_identity = ctxt.to_dict()['user_identity']
self.assertTrue(ctxt.domain in user_identity)
self.assertTrue(ctxt.project_domain in user_identity)
self.assertTrue(ctxt.user_domain in user_identity)
self._validate_keys(ctxt, ('[%s]: %s' %
(ctxt.request_id, user_identity)))
class SetDefaultsTestCase(BaseTestCase):
class TestConfigOpts(cfg.ConfigOpts):
def __call__(self, args=None):
return cfg.ConfigOpts.__call__(self,
args=args,
prog='test',
version='1.0',
usage='%(prog)s FOO BAR',
default_config_files=[])
def setUp(self):
super(SetDefaultsTestCase, self).setUp()
self.conf = self.TestConfigOpts()
self.conf.register_opts(_options.log_opts)
self._orig_defaults = dict([(o.dest, o.default)
for o in _options.log_opts])
self.addCleanup(self._restore_log_defaults)
def _restore_log_defaults(self):
for opt in _options.log_opts:
opt.default = self._orig_defaults[opt.dest]
def test_default_log_level_to_none(self):
log.set_defaults(logging_context_format_string=None,
default_log_levels=None)
self.conf([])
self.assertEqual(_options.DEFAULT_LOG_LEVELS,
self.conf.default_log_levels)
def test_change_default(self):
my_default = '%(asctime)s %(levelname)s %(name)s [%(request_id)s '\
'%(user_id)s %(project)s] %(instance)s'\
'%(message)s'
log.set_defaults(logging_context_format_string=my_default)
self.conf([])
self.assertEqual(self.conf.logging_context_format_string, my_default)
def test_change_default_log_level(self):
log.set_defaults(default_log_levels=['foo=bar'])
self.conf([])
self.assertEqual(['foo=bar'], self.conf.default_log_levels)
self.assertIsNotNone(self.conf.logging_context_format_string)
class LogConfigOptsTestCase(BaseTestCase):
def setUp(self):
super(LogConfigOptsTestCase, self).setUp()
def test_print_help(self):
f = six.StringIO()
self.CONF([])
self.CONF.print_help(file=f)
self.assertTrue('debug' in f.getvalue())
self.assertTrue('verbose' in f.getvalue())
self.assertTrue('log-config' in f.getvalue())
self.assertTrue('log-format' in f.getvalue())
def test_debug_verbose(self):
self.CONF(['--debug', '--verbose'])
self.assertEqual(self.CONF.debug, True)
self.assertEqual(self.CONF.verbose, True)
def test_logging_opts(self):
self.CONF([])
self.assertIsNone(self.CONF.log_config_append)
self.assertIsNone(self.CONF.log_file)
self.assertIsNone(self.CONF.log_dir)
self.assertIsNone(self.CONF.log_format)
self.assertEqual(self.CONF.log_date_format,
_options._DEFAULT_LOG_DATE_FORMAT)
self.assertEqual(self.CONF.use_syslog, False)
self.assertEqual(self.CONF.use_syslog_rfc_format, False)
def test_log_file(self):
log_file = '/some/path/foo-bar.log'
self.CONF(['--log-file', log_file])
self.assertEqual(self.CONF.log_file, log_file)
def test_log_dir_handlers(self):
log_dir = tempfile.mkdtemp()
self.CONF(['--log-dir', log_dir])
self.CONF.set_default('use_stderr', False)
log._setup_logging_from_conf(self.CONF, 'test', 'test')
logger = log._loggers[None].logger
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0],
logging.handlers.WatchedFileHandler)
def test_logfile_deprecated(self):
logfile = '/some/other/path/foo-bar.log'
self.CONF(['--logfile', logfile])
self.assertEqual(self.CONF.log_file, logfile)
def test_log_dir(self):
log_dir = '/some/path/'
self.CONF(['--log-dir', log_dir])
self.assertEqual(self.CONF.log_dir, log_dir)
def test_logdir_deprecated(self):
logdir = '/some/other/path/'
self.CONF(['--logdir', logdir])
self.assertEqual(self.CONF.log_dir, logdir)
def test_log_format_overrides_formatter(self):
self.CONF(['--log-format', '[Any format]'])
log._setup_logging_from_conf(self.CONF, 'test', 'test')
logger = log._loggers[None].logger
for handler in logger.handlers:
formatter = handler.formatter
self.assertTrue(isinstance(formatter, logging.Formatter))
def test_default_formatter(self):
log._setup_logging_from_conf(self.CONF, 'test', 'test')
logger = log._loggers[None].logger
for handler in logger.handlers:
formatter = handler.formatter
self.assertTrue(isinstance(formatter,
formatters.ContextFormatter))
class LogConfigTestCase(BaseTestCase):
minimal_config = b"""[loggers]
keys=root
[formatters]
keys=
[handlers]
keys=
[logger_root]
handlers=
"""
def setUp(self):
super(LogConfigTestCase, self).setUp()
names = self.create_tempfiles([('logging', self.minimal_config)])
self.log_config_append = names[0]
def test_log_config_append_ok(self):
self.config(log_config_append=self.log_config_append)
log.setup(self.CONF, 'test_log_config_append')
def test_log_config_append_not_exist(self):
os.remove(self.log_config_append)
self.config(log_config_append=self.log_config_append)
self.assertRaises(log.LogConfigError, log.setup,
self.CONF,
'test_log_config_append')
def test_log_config_append_invalid(self):
names = self.create_tempfiles([('logging', self.minimal_config[5:])])
self.log_config_append = names[0]
self.config(log_config_append=self.log_config_append)
self.assertRaises(log.LogConfigError, log.setup,
self.CONF,
'test_log_config_append')
def test_log_config_append_unreadable(self):
os.chmod(self.log_config_append, 0)
self.config(log_config_append=self.log_config_append)
self.assertRaises(log.LogConfigError, log.setup,
self.CONF,
'test_log_config_append')
def test_log_config_append_disable_existing_loggers(self):
self.config(log_config_append=self.log_config_append)
with mock.patch('logging.config.fileConfig') as fileConfig:
log.setup(self.CONF, 'test_log_config_append')
fileConfig.assert_called_once_with(self.log_config_append,
disable_existing_loggers=False)
class KeywordArgumentAdapterTestCase(BaseTestCase):
def setUp(self):
super(KeywordArgumentAdapterTestCase, self).setUp()
# Construct a mock that will look like a Logger configured to
# emit messages at DEBUG or higher.
self.mock_log = mock.Mock()
self.mock_log.manager.disable = logging.NOTSET
self.mock_log.isEnabledFor.return_value = True
self.mock_log.getEffectiveLevel.return_value = logging.DEBUG
def test_empty_kwargs(self):
a = log.KeywordArgumentAdapter(self.mock_log, {})
msg, kwargs = a.process('message', {})
self.assertEqual(kwargs, {'extra': {'extra_keys': []}})
def test_include_constructor_extras(self):
a = log.KeywordArgumentAdapter(self.mock_log, {'foo': 'blah'})
msg, kwargs = a.process('message', {})
self.assertEqual(kwargs,
{'extra': {'foo': 'blah', 'extra_keys': ['foo']}})
def test_pass_through_exc_info(self):
a = log.KeywordArgumentAdapter(self.mock_log, {})
msg, kwargs = a.process('message', {'exc_info': 'the info'})
self.assertEqual(
kwargs,
{'extra': {'extra_keys': []},
'exc_info': 'the info'},
)
def test_update_extras(self):
a = log.KeywordArgumentAdapter(self.mock_log, {})
msg, kwargs = a.process(
'message', {'context': 'some context object',
'instance': 'instance identifier',
'resource_uuid': 'UUID for instance',
'anything': 'goes'}
)
self.assertEqual(
kwargs,
{'extra': {'anything': 'goes',
'context': 'some context object',
'extra_keys': ['anything', 'context',
'instance', 'resource_uuid'],
'instance': 'instance identifier',
'resource_uuid': 'UUID for instance',
'anything': 'goes'}},
)
def test_pass_args_to_log(self):
a = log.KeywordArgumentAdapter(self.mock_log, {})
a.log(logging.DEBUG, 'message', name='value', exc_info='exception')
if six.PY3:
self.mock_log._log.assert_called_once_with(
logging.DEBUG,
'message',
(),
extra={'name': 'value',
'extra_keys': ['name']},
exc_info='exception',
)
else:
self.mock_log.log.assert_called_once_with(
logging.DEBUG,
'message',
extra={'name': 'value',
'extra_keys': ['name']},
exc_info='exception',
)
def test_pass_args_via_debug(self):
a = log.KeywordArgumentAdapter(self.mock_log, {})
a.debug('message', name='value', exc_info='exception')
# The adapter implementation for debug() is different for
# python 3, so we expect a different method to be called
# internally.
if six.PY3:
self.mock_log._log.assert_called_once_with(
logging.DEBUG,
'message',
(),
extra={'name': 'value',
'extra_keys': ['name']},
exc_info='exception',
)
else:
self.mock_log.debug.assert_called_once_with(
'message',
extra={'name': 'value',
'extra_keys': ['name']},
exc_info='exception',
)
|
cccfran/sympy
|
refs/heads/master
|
examples/intermediate/coupled_cluster.py
|
23
|
#!/usr/bin/env python
"""
Calculates the Coupled-Cluster energy- and amplitude equations
See 'An Introduction to Coupled Cluster Theory' by
T. Daniel Crawford and Henry F. Schaefer III.
http://www.ccc.uga.edu/lec_top/cc/html/review.html
"""
from sympy.physics.secondquant import (AntiSymmetricTensor, wicks,
F, Fd, NO, evaluate_deltas, substitute_dummies, Commutator,
simplify_index_permutations, PermutationOperator)
from sympy import (
symbols, expand, pprint, Rational, latex, Dummy
)
pretty_dummies_dict = {
'above': 'cdefgh',
'below': 'klmno',
'general': 'pqrstu'
}
def get_CC_operators():
"""
Returns a tuple (T1,T2) of unique operators.
"""
i = symbols('i', below_fermi=True, cls=Dummy)
a = symbols('a', above_fermi=True, cls=Dummy)
t_ai = AntiSymmetricTensor('t', (a,), (i,))
ai = NO(Fd(a)*F(i))
i, j = symbols('i,j', below_fermi=True, cls=Dummy)
a, b = symbols('a,b', above_fermi=True, cls=Dummy)
t_abij = AntiSymmetricTensor('t', (a, b), (i, j))
abji = NO(Fd(a)*Fd(b)*F(j)*F(i))
T1 = t_ai*ai
T2 = Rational(1, 4)*t_abij*abji
return (T1, T2)
def main():
print()
print("Calculates the Coupled-Cluster energy- and amplitude equations")
print("See 'An Introduction to Coupled Cluster Theory' by")
print("T. Daniel Crawford and Henry F. Schaefer III")
print("http://www.ccc.uga.edu/lec_top/cc/html/review.html")
print()
# setup hamiltonian
p, q, r, s = symbols('p,q,r,s', cls=Dummy)
f = AntiSymmetricTensor('f', (p,), (q,))
pr = NO((Fd(p)*F(q)))
v = AntiSymmetricTensor('v', (p, q), (r, s))
pqsr = NO(Fd(p)*Fd(q)*F(s)*F(r))
H = f*pr + Rational(1, 4)*v*pqsr
print("Using the hamiltonian:", latex(H))
print("Calculating 4 nested commutators")
C = Commutator
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 1...")
comm1 = wicks(C(H, T))
comm1 = evaluate_deltas(comm1)
comm1 = substitute_dummies(comm1)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 2...")
comm2 = wicks(C(comm1, T))
comm2 = evaluate_deltas(comm2)
comm2 = substitute_dummies(comm2)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 3...")
comm3 = wicks(C(comm2, T))
comm3 = evaluate_deltas(comm3)
comm3 = substitute_dummies(comm3)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 4...")
comm4 = wicks(C(comm3, T))
comm4 = evaluate_deltas(comm4)
comm4 = substitute_dummies(comm4)
print("construct Hausdoff expansion...")
eq = H + comm1 + comm2/2 + comm3/6 + comm4/24
eq = eq.expand()
eq = evaluate_deltas(eq)
eq = substitute_dummies(eq, new_indices=True,
pretty_indices=pretty_dummies_dict)
print("*********************")
print()
print("extracting CC equations from full Hbar")
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
print()
print("CC Energy:")
print(latex(wicks(eq, simplify_dummies=True,
keep_only_fully_contracted=True)))
print()
print("CC T1:")
eqT1 = wicks(NO(Fd(i)*F(a))*eq, simplify_kronecker_deltas=True, keep_only_fully_contracted=True)
eqT1 = substitute_dummies(eqT1)
print(latex(eqT1))
print()
print("CC T2:")
eqT2 = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*eq, simplify_dummies=True, keep_only_fully_contracted=True, simplify_kronecker_deltas=True)
P = PermutationOperator
eqT2 = simplify_index_permutations(eqT2, [P(a, b), P(i, j)])
print(latex(eqT2))
if __name__ == "__main__":
main()
|
sourcefabric/airtime
|
refs/heads/2.5.x
|
python_apps/media-monitor/mm2/tests/test_listeners.py
|
12
|
import os, shutil
import time
import pyinotify
import unittest
from pydispatch import dispatcher
from media.monitor.listeners import OrganizeListener
from media.monitor.events import OrganizeFile
from os.path import join, normpath, abspath
def create_file(p):
with open(p, 'w') as f: f.write(" ")
class TestOrganizeListener(unittest.TestCase):
def setUp(self):
self.organize_path = 'test_o'
self.sig = 'org'
def my_abs_path(x):
return normpath(join(os.getcwd(), x))
self.sample_files = [ my_abs_path(join(self.organize_path, f))
for f in [ "gogi.mp3",
"gio.mp3",
"mimino.ogg" ] ]
os.mkdir(self.organize_path)
def test_flush_events(self):
org = self.create_org()
self.create_sample_files()
received = [0]
def pass_event(sender, event):
if isinstance(event, OrganizeFile):
received[0] += 1
self.assertTrue( abspath(event.path) in self.sample_files )
dispatcher.connect(pass_event, signal=self.sig, sender=dispatcher.Any,
weak=True)
org.flush_events( self.organize_path )
self.assertEqual( received[0], len(self.sample_files) )
self.delete_sample_files()
def test_process(self):
org = self.create_org()
received = [0]
def pass_event(sender, event):
if isinstance(event, OrganizeFile):
self.assertTrue( event.path in self.sample_files )
received[0] += 1
dispatcher.connect(pass_event, signal=self.sig, sender=dispatcher.Any,
weak=True)
wm = pyinotify.WatchManager()
def stopper(notifier):
return received[0] == len(self.sample_files)
tn = pyinotify.ThreadedNotifier(wm, default_proc_fun=org)
tn.daemon = True
tn.start()
wm.add_watch(self.organize_path, pyinotify.ALL_EVENTS, rec=True,
auto_add=True)
time.sleep(0.5)
self.create_sample_files()
time.sleep(1)
self.assertEqual( len(self.sample_files), received[0] )
self.delete_sample_files()
def tearDown(self):
shutil.rmtree(self.organize_path)
def create_sample_files(self):
for f in self.sample_files: create_file(f)
def delete_sample_files(self):
for f in self.sample_files: os.remove(f)
def create_org(self):
return OrganizeListener( signal=self.sig )
if __name__ == '__main__': unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.