code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import os
import re
import codecs
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-constance',
version=find_version("constance", "__init__.py"),
url="http://github.com/jezdez/django-constance",
description='Django live settings with pluggable backends, including Redis.',
long_description=read('README.rst'),
author='Jannis Leidel',
author_email='jannis@leidel.info',
license='BSD',
keywords='django libraries settings redis'.split(),
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
],
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
zip_safe=False,
extras_require={
'database': ['django-picklefield'],
'redis': ['redis'],
}
)
| metalpriest/django-constance | setup.py | Python | bsd-3-clause | 1,931 |
from fabric.api import *
from fabric.contrib.files import *
from cloudbio.flavor import Flavor
from cloudbio.custom.shared import (_fetch_and_unpack)
class BioTestFlavor(Flavor):
"""A Flavor for cross Bio* tests
"""
def __init__(self, env):
Flavor.__init__(self,env)
self.name = "Bio* cross-lang flavor"
def rewrite_config_items(self, name, items):
if name == "packages":
# list.remove('screen')
# list.append('test')
return items
elif name == "python":
return [ 'biopython' ]
elif name == "perl":
return [ 'bioperl' ]
elif name == "ruby":
return [ 'bio' ]
elif name == "custom":
return []
else:
return items
def post_install(self):
env.logger.info("Starting post-install")
env.logger.info("Load Scalability tests")
if exists('Scalability'):
with cd('Scalability'):
run('git pull')
else:
_fetch_and_unpack("git clone git://github.com/pjotrp/Scalability.git")
# Now run a post installation routine (for the heck of it)
run('./Scalability/scripts/hello.sh')
env.logger.info("Load Cross-language tests")
if exists('Cross-language-interfacing'):
with cd('Cross-language-interfacing'):
run('git pull')
else:
_fetch_and_unpack("git clone git://github.com/pjotrp/Cross-language-interfacing.git")
# Special installs for the tests
with cd('Cross-language-interfacing'):
sudo('./scripts/install-packages-root.sh ')
run('./scripts/install-packages.sh')
run('./scripts/create_test_files.rb')
env.flavor = BioTestFlavor(env)
| heuermh/cloudbiolinux | contrib/flavor/pjotrp/biotest/biotestflavor.py | Python | mit | 1,800 |
"""Provide access to Python's configuration information.
This is actually PyPy's minimal configuration information.
The specific configuration variables available depend heavily on the
platform and configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
"""
__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $"
import sys
import os
import shlex
from distutils.errors import DistutilsPlatformError
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
project_base = os.path.dirname(os.path.abspath(sys.executable))
python_build = False
def get_python_inc(plat_specific=0, prefix=None):
from os.path import join as j
return j(sys.prefix, 'include')
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = PREFIX
if standard_lib:
return os.path.join(prefix, "lib-python", get_python_version())
return os.path.join(prefix, 'site-packages')
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
g['EXE'] = ""
g['SO'] = ".so"
g['SOABI'] = g['SO'].rsplit('.')[0]
g['LIBDIR'] = os.path.join(sys.prefix, 'lib')
g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
g['EXE'] = ".exe"
g['SO'] = ".pyd"
g['SOABI'] = g['SO'].rsplit('.')[0]
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
def customize_compiler(compiler):
"""Dummy method to let some easy_install packages that have
optional C speedup components.
"""
if compiler.compiler_type == "unix":
compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit'])
compiler.shared_lib_extension = get_config_var('SO')
if "CPPFLAGS" in os.environ:
cppflags = shlex.split(os.environ["CPPFLAGS"])
compiler.compiler.extend(cppflags)
compiler.compiler_so.extend(cppflags)
compiler.linker_so.extend(cppflags)
if "CFLAGS" in os.environ:
cflags = shlex.split(os.environ["CFLAGS"])
compiler.compiler.extend(cflags)
compiler.compiler_so.extend(cflags)
compiler.linker_so.extend(cflags)
if "LDFLAGS" in os.environ:
ldflags = shlex.split(os.environ["LDFLAGS"])
compiler.linker_so.extend(ldflags)
from sysconfig_cpython import (
parse_makefile, _variable_rx, expand_makefile_vars)
| jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/distutils/sysconfig_pypy.py | Python | mit | 4,713 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import argparse
import collections
import sys
import os
import subprocess
import io
try:
from lxml import etree
except ModuleNotFoundError as e:
etree = e
try:
from shlex import join as shlex_join
except ImportError as e:
shlex_join = e
try:
from shlex import quote as shlex_quote
except ImportError as e:
shlex_quote = e
class NoCommand(Exception):
pass
BORING_INTERFACES = [
'org.freedesktop.DBus.Peer',
'org.freedesktop.DBus.Introspectable',
'org.freedesktop.DBus.Properties',
]
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
RESET = '\x1b[39m'
def xml_parser():
return etree.XMLParser(no_network=True,
remove_comments=False,
strip_cdata=False,
resolve_entities=False)
def print_method(declarations, elem, *, prefix, file, is_signal=False):
name = elem.get('name')
klass = 'signal' if is_signal else 'method'
declarations[klass].append(name)
# @org.freedesktop.systemd1.Privileged("true")
# SetShowStatus(in s mode);
for anno in elem.findall('./annotation'):
anno_name = anno.get('name')
anno_value = anno.get('value')
print(f'''{prefix}@{anno_name}("{anno_value}")''', file=file)
print(f'''{prefix}{name}(''', file=file, end='')
lead = ',\n' + prefix + ' ' * len(name) + ' '
for num, arg in enumerate(elem.findall('./arg')):
argname = arg.get('name')
if argname is None:
if opts.print_errors:
print(f'method {name}: argument {num+1} has no name', file=sys.stderr)
argname = 'UNNAMED'
type = arg.get('type')
if not is_signal:
direction = arg.get('direction')
print(f'''{lead if num > 0 else ''}{direction:3} {type} {argname}''', file=file, end='')
else:
print(f'''{lead if num > 0 else ''}{type} {argname}''', file=file, end='')
print(f');', file=file)
ACCESS_MAP = {
'read' : 'readonly',
'write' : 'readwrite',
}
def value_ellipsis(type):
if type == 's':
return "'...'";
if type[0] == 'a':
inner = value_ellipsis(type[1:])
return f"[{inner}{', ...' if inner != '...' else ''}]";
return '...'
def print_property(declarations, elem, *, prefix, file):
name = elem.get('name')
type = elem.get('type')
access = elem.get('access')
declarations['property'].append(name)
# @org.freedesktop.DBus.Property.EmitsChangedSignal("false")
# @org.freedesktop.systemd1.Privileged("true")
# readwrite b EnableWallMessages = false;
for anno in elem.findall('./annotation'):
anno_name = anno.get('name')
anno_value = anno.get('value')
print(f'''{prefix}@{anno_name}("{anno_value}")''', file=file)
access = ACCESS_MAP.get(access, access)
print(f'''{prefix}{access} {type} {name} = {value_ellipsis(type)};''', file=file)
def print_interface(iface, *, prefix, file, print_boring, only_interface, declarations):
name = iface.get('name')
is_boring = (name in BORING_INTERFACES or
only_interface is not None and name != only_interface)
if is_boring and print_boring:
print(f'''{prefix}interface {name} {{ ... }};''', file=file)
elif not is_boring and not print_boring:
print(f'''{prefix}interface {name} {{''', file=file)
prefix2 = prefix + ' '
for num, elem in enumerate(iface.findall('./method')):
if num == 0:
print(f'''{prefix2}methods:''', file=file)
print_method(declarations, elem, prefix=prefix2 + ' ', file=file)
for num, elem in enumerate(iface.findall('./signal')):
if num == 0:
print(f'''{prefix2}signals:''', file=file)
print_method(declarations, elem, prefix=prefix2 + ' ', file=file, is_signal=True)
for num, elem in enumerate(iface.findall('./property')):
if num == 0:
print(f'''{prefix2}properties:''', file=file)
print_property(declarations, elem, prefix=prefix2 + ' ', file=file)
print(f'''{prefix}}};''', file=file)
def document_has_elem_with_text(document, elem, item_repr):
predicate = f".//{elem}" # [text() = 'foo'] doesn't seem supported :(
for loc in document.findall(predicate):
if loc.text == item_repr:
return True
return False
def check_documented(document, declarations, stats):
missing = []
for klass, items in declarations.items():
stats['total'] += len(items)
for item in items:
if klass == 'method':
elem = 'function'
item_repr = f'{item}()'
elif klass == 'signal':
elem = 'function'
item_repr = item
elif klass == 'property':
elem = 'varname'
item_repr = item
else:
assert False, (klass, item)
if not document_has_elem_with_text(document, elem, item_repr):
if opts.print_errors:
print(f'{klass} {item} is not documented :(')
missing.append((klass, item))
stats['missing'] += len(missing)
return missing
def xml_to_text(destination, xml, *, only_interface=None):
file = io.StringIO()
declarations = collections.defaultdict(list)
interfaces = []
print(f'''node {destination} {{''', file=file)
for print_boring in [False, True]:
for iface in xml.findall('./interface'):
print_interface(iface, prefix=' ', file=file,
print_boring=print_boring,
only_interface=only_interface,
declarations=declarations)
name = iface.get('name')
if not name in BORING_INTERFACES:
interfaces.append(name)
print(f'''}};''', file=file)
return file.getvalue(), declarations, interfaces
def subst_output(document, programlisting, stats):
executable = programlisting.get('executable', None)
if executable is None:
# Not our thing
return
executable = programlisting.get('executable')
node = programlisting.get('node')
interface = programlisting.get('interface')
argv = [f'{opts.build_dir}/{executable}', f'--bus-introspect={interface}']
if isinstance(shlex_join, Exception):
print(f'COMMAND: {" ".join(shlex_quote(arg) for arg in argv)}')
else:
print(f'COMMAND: {shlex_join(argv)}')
try:
out = subprocess.check_output(argv, universal_newlines=True)
except FileNotFoundError:
print(f'{executable} not found, ignoring', file=sys.stderr)
return
xml = etree.fromstring(out, parser=xml_parser())
new_text, declarations, interfaces = xml_to_text(node, xml, only_interface=interface)
programlisting.text = '\n' + new_text + ' '
if declarations:
missing = check_documented(document, declarations, stats)
parent = programlisting.getparent()
# delete old comments
for child in parent:
if (child.tag == etree.Comment
and 'Autogenerated' in child.text):
parent.remove(child)
if (child.tag == etree.Comment
and 'not documented' in child.text):
parent.remove(child)
if (child.tag == "variablelist"
and child.attrib.get("generated",False) == "True"):
parent.remove(child)
# insert pointer for systemd-directives generation
the_tail = programlisting.tail #tail is erased by addnext, so save it here.
prev_element = etree.Comment("Autogenerated cross-references for systemd.directives, do not edit")
programlisting.addnext(prev_element)
programlisting.tail = the_tail
for interface in interfaces:
variablelist = etree.Element("variablelist")
variablelist.attrib['class'] = 'dbus-interface'
variablelist.attrib['generated'] = 'True'
variablelist.attrib['extra-ref'] = interface
prev_element.addnext(variablelist)
prev_element.tail = the_tail
prev_element = variablelist
for decl_type,decl_list in declarations.items():
for declaration in decl_list:
variablelist = etree.Element("variablelist")
variablelist.attrib['class'] = 'dbus-'+decl_type
variablelist.attrib['generated'] = 'True'
if decl_type == 'method' :
variablelist.attrib['extra-ref'] = declaration + '()'
else:
variablelist.attrib['extra-ref'] = declaration
prev_element.addnext(variablelist)
prev_element.tail = the_tail
prev_element = variablelist
last_element = etree.Comment("End of Autogenerated section")
prev_element.addnext(last_element)
prev_element.tail = the_tail
last_element.tail = the_tail
# insert comments for undocumented items
for item in reversed(missing):
comment = etree.Comment(f'{item[0]} {item[1]} is not documented!')
comment.tail = programlisting.tail
parent.insert(parent.index(programlisting) + 1, comment)
def process(page):
src = open(page).read()
xml = etree.fromstring(src, parser=xml_parser())
# print('parsing {}'.format(name), file=sys.stderr)
if xml.tag != 'refentry':
return
stats = collections.Counter()
pls = xml.findall('.//programlisting')
for pl in pls:
subst_output(xml, pl, stats)
out_text = etree.tostring(xml, encoding='unicode')
# massage format to avoid some lxml whitespace handling idiosyncrasies
# https://bugs.launchpad.net/lxml/+bug/526799
out_text = (src[:src.find('<refentryinfo')] +
out_text[out_text.find('<refentryinfo'):] +
'\n')
if not opts.test:
with open(page, 'w') as out:
out.write(out_text)
return dict(stats=stats, modified=(out_text != src))
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--test', action='store_true',
help='only verify that everything is up2date')
p.add_argument('--build-dir', default='build')
p.add_argument('pages', nargs='+')
opts = p.parse_args()
opts.print_errors = not opts.test
return opts
if __name__ == '__main__':
opts = parse_args()
for item in (etree, shlex_quote):
if isinstance(item, Exception):
print(item, file=sys.stderr)
exit(77 if opts.test else 1)
if not os.path.exists(f'{opts.build_dir}/systemd'):
exit(f"{opts.build_dir}/systemd doesn't exist. Use --build-dir=.")
stats = {page.split('/')[-1] : process(page) for page in opts.pages}
# Let's print all statistics at the end
mlen = max(len(page) for page in stats)
total = sum((item['stats'] for item in stats.values()), collections.Counter())
total = 'total', dict(stats=total, modified=False)
modified = []
classification = 'OUTDATED' if opts.test else 'MODIFIED'
for page, info in sorted(stats.items()) + [total]:
m = info['stats']['missing']
t = info['stats']['total']
p = page + ':'
c = classification if info['modified'] else ''
if c:
modified.append(page)
color = RED if m > t/2 else (YELLOW if m else GREEN)
print(f'{color}{p:{mlen + 1}} {t - m}/{t} {c}{RESET}')
if opts.test and modified:
exit(f'Outdated pages: {", ".join(modified)}\n'
f'Hint: ninja -C {opts.build_dir} update-dbus-docs')
| msekletar/systemd | tools/update-dbus-docs.py | Python | gpl-2.0 | 11,862 |
# -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2013 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Authentication management.
"""
import sys
from .. import config, log
def load():
"""Load list of available authentication managers."""
auth_type = config.get("auth", "type")
log.LOGGER.debug("Authentication type is %s" % auth_type)
if auth_type == "None":
return None
elif auth_type == 'custom':
auth_module = config.get("auth", "custom_handler")
__import__(auth_module)
module = sys.modules[auth_module]
else:
root_module = __import__(
"auth.%s" % auth_type, globals=globals(), level=2)
module = getattr(root_module, auth_type)
# Override auth.is_authenticated
sys.modules[__name__].is_authenticated = module.is_authenticated
return module
def is_authenticated(user, password):
"""Check if the user is authenticated.
This method is overriden if an auth module is loaded.
"""
return True # Default is always True: no authentication
| wohnsinn2/Radicale | radicale/auth/__init__.py | Python | gpl-3.0 | 1,784 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gflags as flags # NOQA
import gflags_validators as flags_validators # NOQA
| askdaddy/PerfKitBenchmarker | perfkitbenchmarker/__init__.py | Python | apache-2.0 | 679 |
from django import http
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
def shortcut(request, content_type_id, object_id):
"Redirect to an object's page based on a content-type ID and an object ID."
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absurl = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current().domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.is_secure() and 'https' or 'http'
return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| CollabQ/CollabQ | vendor/django/contrib/contenttypes/views.py | Python | apache-2.0 | 2,734 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print('Adding %s' % self.mode)
elif not self.now:
print('Missing %s' % self.mode)
else:
print('Modifying %s' % self.mode)
for line in self.was:
print('src: >>%s<<' % line)
for line in self.now:
print('gen: >>%s<<' % line)
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print("Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline))
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
#print("LINE=%s" % line)
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print('Missing: %s' % name)
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print("\n\nDelta between:\n src=%s\n gen=%s\n" % (src, gen))
for change in changes:
change.Dump()
print('Done with %s\n\n' % src)
if GetOption('ok'):
open(diff, 'wt').write(output)
if GetOption('halt'):
return 1
else:
print("\nSAME:\n src=%s\n gen=%s" % (src, gen))
if input:
print(' ** Matched expected diff. **')
print('\n')
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| chromium/chromium | ppapi/generators/idl_diff.py | Python | bsd-3-clause | 9,131 |
#
# Copyright (C) 2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import utils
def all_free_variables(edges):
return utils.unions(edges, lambda edge: edge.get_free_vars())
def get_variable_sources(inscriptions):
sources = {}
for inscription in inscriptions:
if not inscription.is_expr_variable():
continue
if sources.get(inscription.expr):
continue
if inscription.is_bulk():
sources[inscription.expr] = None
else:
sources[inscription.expr] = inscription.uid
return sources
def is_dependant(inscription1, inscription2):
if inscription1.edge is inscription2.edge and \
inscription2.index < inscription1.index:
return True
if not inscription2.is_expr_variable():
return False
return inscription2.expr in inscription1.get_foreign_variables()
def analyze_transition(tr):
variable_sources = {} # string -> uid - which inscriptions carry input variables
reuse_tokens = {} # uid -> uid - identification number of token for output inscpription
fresh_tokens = [] # (uid, type) - what tokens has to be created for output
used_tokens = [] # [uid] - Tokens from input inscriptions that are reused on output
variable_sources_out = {} # string -> uid or None
bulk_overtake = [] # [uid]
overtaken_variables = set()
def inscription_out_weight(inscription):
# Reorder edges, bulk edges first because we want them send first
# Otherwise it can cause problems like in sending results in "workers" example
s = inscription.config.get("seq")
if s is None:
seq = 0
else:
seq = int(s) * 3
if inscription.is_bulk():
return seq
# Unconditional edges has higher priority
if inscription.is_conditioned():
return seq + 2
else:
return seq + 1
def inscription_in_weight(inscription):
if inscription.is_conditioned():
return 1
else:
return 0
inscriptions_in = sum((edge.inscriptions for edge in tr.edges_in), [])
inscriptions_in.sort(key=inscription_in_weight)
inscriptions_out = sum((edge.inscriptions for edge in tr.edges_out), [])
inscriptions_out.sort(key=inscription_out_weight)
variable_sources = get_variable_sources(inscriptions_in)
# Order input inscriptions by variable dependancy
inscriptions_in = utils.topological_ordering(inscriptions_in, is_dependant)
if inscriptions_in is None:
raise utils.PtpException("Circle variable dependancy", tr.get_source())
# Try reuse tokens
for inscription in inscriptions_out:
if inscription.is_bulk() or not inscription.is_local():
continue # Bulk and nonlocal edge cannot use token reusage
if not inscription.is_expr_variable():
continue # Current implementation reuses tokens only for variable expression
if inscription.is_collective():
continue # Collective operations cannot use token reusage
token_uid = variable_sources.get(inscription.expr)
if token_uid is None or token_uid in used_tokens:
# Variable is not taken from input as token
# or token is already reused --> reusage not possible
continue
reuse_tokens[inscription.uid] = token_uid
used_tokens.append(token_uid)
# Setup fresh variables where token was not reused
for inscription in inscriptions_out:
if not inscription.is_expr_variable():
continue # We are interested only in variables
variable = inscription.expr
if variable in variable_sources:
# Variable take from input so we do not have to deal here with it
continue
if variable in variable_sources_out:
# Variable already prepared for output
continue
if inscription.is_bulk():
# No token, just build variable
variable_sources_out[variable] = None
continue
if inscription.is_local():
# Local send, we prepare token
fresh_tokens.append((inscription.uid, inscription.edge.place.type))
variable_sources_out[variable] = inscription.uid
reuse_tokens[inscription.uid] = inscription.uid # Use this fresh new token
else:
# Just create variable
variable_sources_out[variable] = None
for inscription in reversed(inscriptions_out):
# Now we are checking overtake. It has to be in reversed order
# becacase overtake has to be the last operation on variable
if not inscription.is_bulk() or not inscription.is_expr_variable():
continue # We are interested only in variables and bulk inscriptions
if inscription.expr not in overtaken_variables:
overtaken_variables.add(inscription.expr)
bulk_overtake.append(inscription.uid)
for inscription in inscriptions_out:
for variable in inscription.get_other_variables():
if variable not in variable_sources and \
variable not in variable_sources_out:
variable_sources_out[variable] = None
tr.inscriptions_in = inscriptions_in
tr.inscriptions_out = inscriptions_out
tr.variable_sources = variable_sources
tr.reuse_tokens = reuse_tokens
tr.variable_sources_out = variable_sources_out
tr.fresh_tokens = fresh_tokens
tr.bulk_overtake = bulk_overtake
| MrPablozOne/kaira | ptp/base/analysis.py | Python | gpl-3.0 | 6,178 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(rnn_cell_impl.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1].value:
raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1].value
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1].value],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or "lstm_block_wrapper"):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"kernel",
[input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"bias", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("w_i_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("w_f_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("w_o_diag", [self._num_units], dtype=dtype)
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| mdrumond/tensorflow | tensorflow/contrib/rnn/python/ops/lstm_ops.py | Python | apache-2.0 | 24,027 |
"""
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from sudo.decorators import sudo_required
from sentry import features
from sentry.models import (
AuthProvider, LostPasswordHash, Organization, OrganizationMemberType,
Project, Team, UserOption
)
from sentry.plugins import plugins
from sentry.web.decorators import login_required
from sentry.web.forms.accounts import (
AccountSettingsForm, NotificationSettingsForm, AppearanceSettingsForm,
RegistrationForm, RecoverPasswordForm, ChangePasswordRecoverForm,
ProjectEmailOptionsForm)
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers, get_login_redirect
from sentry.utils.safe import safe_execute
@csrf_protect
@never_cache
@transaction.atomic
def register(request):
from django.conf import settings
if not (features.has('auth:register') or request.session.get('can_register')):
return HttpResponseRedirect(reverse('sentry'))
form = RegistrationForm(request.POST or None,
captcha=bool(request.session.get('needs_captcha')))
if form.is_valid():
user = form.save()
# TODO(dcramer): ideally this would be handled by a special view
# specifically for organization registration
if settings.SENTRY_SINGLE_ORGANIZATION:
org = Organization.get_default()
defaults = {
'has_global_access': True,
'type': OrganizationMemberType.MEMBER,
}
try:
auth_provider = AuthProvider.objects.get(
organization=org.id,
)
except AuthProvider.DoesNotExist:
pass
else:
defaults.update({
'has_global_access': auth_provider.default_global_access,
'type': auth_provider.default_role,
})
org.member_set.create(
user=user,
**defaults
)
# can_register should only allow a single registration
request.session.pop('can_register', None)
# HACK: grab whatever the first backend is and assume it works
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
request.session.pop('needs_captcha', None)
return login_redirect(request)
elif request.POST and not request.session.get('needs_captcha'):
request.session['needs_captcha'] = 1
form = RegistrationForm(request.POST or None, captcha=True)
form.errors.pop('captcha', None)
return render_to_response('sentry/register.html', {
'form': form,
}, request)
@login_required
def login_redirect(request):
login_url = get_login_redirect(request)
return HttpResponseRedirect(login_url)
def recover(request):
form = RecoverPasswordForm(request.POST or None,
captcha=bool(request.session.get('needs_captcha')))
if form.is_valid():
password_hash, created = LostPasswordHash.objects.get_or_create(
user=form.cleaned_data['user']
)
if not password_hash.is_valid():
password_hash.date_added = timezone.now()
password_hash.set_hash()
password_hash.save()
password_hash.send_recover_mail()
request.session.pop('needs_captcha', None)
return render_to_response('sentry/account/recover/sent.html', {
'email': password_hash.user.email,
}, request)
elif request.POST and not request.session.get('needs_captcha'):
request.session['needs_captcha'] = 1
form = RecoverPasswordForm(request.POST or None, captcha=True)
form.errors.pop('captcha', None)
context = {
'form': form,
}
return render_to_response('sentry/account/recover/index.html', context, request)
def recover_confirm(request, user_id, hash):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
context = {}
tpl = 'sentry/account/recover/failure.html'
else:
tpl = 'sentry/account/recover/confirm.html'
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
context = {
'form': form,
}
return render_to_response(tpl, context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def settings(request):
form = AccountSettingsForm(request.user, request.POST or None, initial={
'email': request.user.email,
'username': request.user.username,
'first_name': request.user.first_name,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'settings',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(request.user, request.POST or None, initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def notification_settings(request):
settings_form = NotificationSettingsForm(request.user, request.POST or None)
# TODO(dcramer): this is an extremely bad pattern and we need a more optimal
# solution for rendering this (that ideally plays well with the org data)
project_list = []
organization_list = Organization.objects.get_for_user(
user=request.user,
)
for organization in organization_list:
team_list = Team.objects.get_for_user(
user=request.user,
organization=organization,
)
for team in team_list:
project_list.extend(
Project.objects.get_for_user(
user=request.user,
team=team,
)
)
project_forms = [
(project, ProjectEmailOptionsForm(
project, request.user,
request.POST or None,
prefix='project-%s' % (project.id,)
))
for project in sorted(project_list, key=lambda x: (x.team.name, x.name))
]
ext_forms = []
for plugin in plugins.all():
for form in safe_execute(plugin.get_notification_forms) or ():
form = safe_execute(form, plugin, request.user, request.POST or None, prefix=plugin.slug)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(itertools.chain(
[settings_form], ext_forms, (f for _, f in project_forms)
))
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'settings_form': settings_form,
'project_forms': project_forms,
'ext_forms': ext_forms,
'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/notifications.html', context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
from social_auth.models import UserSocialAuth
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = get_auth_providers()
context = csrf(request)
context.update({
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
})
return render_to_response('sentry/account/identities.html', context, request)
| boneyao/sentry | src/sentry/web/frontend/accounts.py | Python | bsd-3-clause | 10,117 |
#!/usr/bin/env python
from distutils.core import setup, Extension
import sys
if sys.version_info >= (3,):
BOOST_LIB = 'boost_python3'
else:
BOOST_LIB = 'boost_python'
module_RF24Network = Extension('RF24Network',
libraries = ['rf24network', BOOST_LIB],
sources = ['pyRF24Network.cpp'])
setup(name='RF24Network',
version='1.0',
ext_modules=[module_RF24Network]
)
| hvos234/raspberrypi.home | vendor/home/Arduino/libraries/RF24Network/RPi/pyRF24Network/setup.py | Python | bsd-3-clause | 416 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from infra_libs.infra_types.infra_types import freeze
from infra_libs.infra_types.infra_types import thaw
from infra_libs.infra_types.infra_types import FrozenDict
| endlessm/chromium-browser | tools/swarming_client/third_party/infra_libs/infra_types/__init__.py | Python | bsd-3-clause | 327 |
# sql/types_api.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = 'expr', 'type'
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies('sqlalchemy.sql.default_comparator')
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies('sqlalchemy.sql.default_comparator')
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other,
reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than PostgreSQL don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
.. versionadded:: 0.8 The expression system was enhanced to support
customization of operators on a per-type level.
"""
should_evaluate_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows types which have special
behavior for Python None, such as a JSON type, to indicate that
they'd like to handle the None value explicitly.
To set this flag on an existing type, use the
:meth:`.TypeEngine.evaluates_none` method.
.. seealso::
:meth:`.TypeEngine.evaluates_none`
.. versionadded:: 1.1
"""
def evaluates_none(self):
"""Return a copy of this type which has the :attr:`.should_evaluate_none`
flag set to True.
E.g.::
Table(
'some_table', metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
)
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows for types which have
special behavior associated with the Python None value to indicate
that the value doesn't necessarily translate into SQL NULL; a
prime example of this is a JSON type which may wish to persist the
JSON value ``'null'``.
In all cases, the actual NULL SQL value can be always be
persisted in any column by using
the :obj:`~.expression.null` SQL construct in an INSERT statement
or associated with an ORM-mapped attribute.
.. note::
The "evaulates none" flag does **not** apply to a value
of ``None`` passed to :paramref:`.Column.default` or
:paramref:`.Column.server_default`; in these cases, ``None``
still means "no default".
.. versionadded:: 1.1
.. seealso::
:ref:`session_forcing_null` - in the ORM documentation
:paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON
interaction with this flag.
:attr:`.TypeEngine.should_evaluate_none` - class-level flag
"""
typ = self.copy()
typ.should_evaluate_none = True
return typ
def copy(self, **kw):
return self.adapt(self.__class__)
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially impement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.column_expression.__code__ \
is not TypeEngine.column_expression.__code__
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.bind_expression.__code__ \
is not TypeEngine.bind_expression.__code__
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
.. versionadded:: 0.7.2
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]['literal']
except KeyError:
d = self._dialect_info(dialect)
d['literal'] = lp = d['impl'].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl': impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = 'get_col_spec'
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, 'adapt_operator'):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return op, self.type
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
.. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value`
now returns ``self`` by default, rather than falling onto the
more fundamental behavior of
:meth:`.TypeEngine.coerce_compared_value`.
"""
return self
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
.. warning::
Note that the **behavior of coerce_compared_value is not inherited
by default from that of the base type**.
If the :class:`.TypeDecorator` is augmenting a
type that requires special logic for certain types of operators,
this method **must** be overridden. A key example is when decorating
the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
the default rules of :meth:`.TypeEngine.coerce_compared_value` should
be used in order to deal with operators like index operations::
class MyJsonType(TypeDecorator):
impl = postgresql.JSON
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
Without the above step, index operations such as ``mycol['foo']``
will cause the index value ``'foo'`` to be JSON encoded.
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType, )
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
.. versionadded:: 0.8.2
Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier
control of ``__eq__()`` ``__ne__()`` operations.
"""
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type("TDComparator",
(TypeDecorator.Comparator,
self.impl.comparator_factory),
{})
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' %
(self, self.__class__))
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEentTarget"""
super(TypeDecorator, self)._set_parent(column)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEentTarget"""
super(TypeDecorator, self)._set_parent_with_dispatch(parent)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return self.__class__.process_literal_param.__code__ \
is not TypeDecorator.process_literal_param.__code__
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_result_value.__code__ \
is not TypeDecorator.process_result_value.__code__
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: A SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. versionadded:: 0.7.2
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def with_variant(self, type_, dialect_name):
"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
| pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/sqlalchemy/sql/type_api.py | Python | mit | 46,121 |
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vmutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_DEFINE_SYSTEM = 'DefineSystem'
_DESTROY_SYSTEM = 'DestroySystem'
_DESTROY_SNAPSHOT = 'DestroySnapshot'
_ADD_RESOURCE = 'AddResourceSettings'
_REMOVE_RESOURCE = 'RemoveResourceSettings'
_SETTING_TYPE = 'VirtualSystemType'
_VM_GEN = constants.VM_GEN_2
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
def test_create_vm(self):
super(VMUtilsV2TestCase, self).test_create_vm()
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData.new()
self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2,
mock_vssd.VirtualSystemSubType)
self.assertFalse(mock_vssd.SecureBootEnabled)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
@mock.patch.object(vmutilsv2, 'wmi', create=True)
@mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
mock_add_virt_res):
self._lookup_vm()
fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_disk.path_.return_value = self._FAKE_RES_PATH
mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
fake_metric_def_paths = ['fake_0', 'fake_0', None]
fake_metric_resource_paths = [self._FAKE_VM_PATH,
self._FAKE_VM_PATH,
self._FAKE_RES_PATH]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = [mock.call(Name=def_name)
for def_name in [self._vmutils._METRIC_AGGR_CPU_AVG,
self._vmutils._METRIC_AGGR_MEMORY_AVG]]
self._vmutils._conn.CIM_BaseMetricDefinition.assert_has_calls(calls)
calls = []
for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
Subject=fake_metric_resource_paths[i],
Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH])
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
vs.configure_mock(**attrs)
vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs,
vs2]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def _get_fake_instance_notes(self):
return [self._FAKE_VM_UUID]
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
vm_path, dynamic_memory_ratio=1.0):
mock_vs_man_svc = mock.MagicMock()
mock_vs_data = mock.MagicMock()
mock_job = mock.MagicMock()
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
fake_vm_name = 'fake_vm_name'
_conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
mock_check_ret_val.return_value = mock_job
_conn.new.return_value = mock_vs_data
mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
vm_path,
fake_ret_val)
mock_job.associators.return_value = ['fake vm path']
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name=fake_vm_name,
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=dynamic_memory_ratio,
instance_path=mock.sentinel.instance_path)
if not vm_path:
mock_job.associators.assert_called_once_with(
self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_data.ElementName, fake_vm_name)
mock_vs_man_svc.DefineSystem.assert_called_once_with(
ResourceSettings=[], ReferenceConfiguration=None,
SystemSettings=mock_vs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
if dynamic_memory_ratio > 1:
self.assertFalse(mock_vs_data.VirtualNumaEnabled)
mock_get_wmi_obj.assert_called_with('fake vm path')
self.assertEqual(mock_vs_data.Notes, 'fake notes')
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.ConfigurationDataRoot)
self.assertEqual(mock.sentinel.instance_path, mock_vs_data.LogDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SnapshotDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SuspendDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SwapFileDataRoot)
self.assertEqual(response, mock_get_wmi_obj())
def test_create_vm_obj(self):
self._test_create_vm_obj(vm_path='fake vm path')
def test_create_vm_obj_no_vm_path(self):
self._test_create_vm_obj(vm_path=None)
def test_create_vm_obj_dynamic_memory(self):
self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"res_sub_type_dvd":
self._vmutils._DVD_DRIVE_RES_SUB_TYPE,
"parent": mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def test_get_vm_dvd_disk_paths(self):
mock_vm = self._lookup_vm()
mock_sasd1 = mock.MagicMock(
ResourceSubType=self._vmutils._DVD_DISK_RES_SUB_TYPE,
HostResource=[mock.sentinel.FAKE_DVD_PATH1])
mock_settings = mock.MagicMock()
mock_settings.associators.return_value = [mock_sasd1]
mock_vm.associators.return_value = [mock_settings]
ret_val = self._vmutils.get_vm_dvd_disk_paths(self._FAKE_VM_NAME)
self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0])
| takeshineshiro/nova | nova/tests/unit/virt/hyperv/test_vmutilsv2.py | Python | apache-2.0 | 11,758 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Segment Hook
which allows you to connect to your Segment account,
retrieve data from it or write to that file.
NOTE: this hook also relies on the Segment analytics package:
https://github.com/segmentio/analytics-python
"""
import analytics
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class SegmentHook(BaseHook, LoggingMixin):
def __init__(
self,
segment_conn_id='segment_default',
segment_debug_mode=False,
*args,
**kwargs
):
"""
Create new connection to Segment
and allows you to pull data out of Segment or write to it.
You can then use that file with other
Airflow operators to move the data around or interact with segment.
:param segment_conn_id: the name of the connection that has the parameters
we need to connect to Segment.
The connection should be type `json` and include a
write_key security token in the `Extras` field.
:type segment_conn_id: str
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
:type segment_debug_mode: boolean
.. note::
You must include a JSON structure in the `Extras` field.
We need a user's security token to connect to Segment.
So we define it in the `Extras` field as:
`{"write_key":"YOUR_SECURITY_TOKEN"}`
"""
self.segment_conn_id = segment_conn_id
self.segment_debug_mode = segment_debug_mode
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(self.segment_conn_id)
self.extras = self.connection.extra_dejson
self.write_key = self.extras.get('write_key')
if self.write_key is None:
raise AirflowException('No Segment write key provided')
def get_conn(self):
self.log.info('Setting write key for Segment analytics connection')
analytics.debug = self.segment_debug_mode
if self.segment_debug_mode:
self.log.info('Setting Segment analytics connection to debug mode')
analytics.on_error = self.on_error
analytics.write_key = self.write_key
return analytics
def on_error(self, error, items):
"""
Handles error callbacks when using Segment with segment_debug_mode set to True
"""
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error))
| subodhchhabra/airflow | airflow/contrib/hooks/segment_hook.py | Python | apache-2.0 | 3,748 |
import subprocess
import sys
import django.conf
import django.utils.encoding
import matplotlib.pyplot as plt
subprocess.Popen
sys.argv
plt.func() | smmribeiro/intellij-community | python/testData/completion/notImportedQualifiedName/UseImportPriorityWhenAddingImport/main.after.py | Python | apache-2.0 | 149 |
##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class LayeredDictTest( unittest.TestCase ) :
def testDict( self ) :
dict1 = {
"a" : 10,
"b" : {
"c" : 20,
"d" : 30,
},
"e" : 40,
}
dict2 = {
"a" : 20,
"b" : {
"c" : 100,
"f" : {
"g" : 1000,
},
"h" : 1
},
}
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d["a"], 10 )
self.assertEqual( d["b"]["c"], 20 )
self.assertEqual( d["b"]["d"], 30 )
self.assertEqual( d["b"]["f"]["g"], 1000 )
self.assertEqual( d["e"], 40 )
self.assertEqual( d["b"]["h"], 1 )
self.assertRaises( KeyError, d.__getitem__, "z" )
def testCompoundObject( self ) :
dict1 = IECore.CompoundObject(
{
"a" : IECore.IntData( 10 ),
"b" : {
"c" : IECore.IntData( 20 ),
"d" : IECore.IntData( 30 ),
},
"e" : IECore.IntData( 40 ),
}
)
dict2 = IECore.CompoundObject(
{
"a" : IECore.IntData( 20 ),
"b" : {
"c" : IECore.IntData( 100 ),
"f" : {
"g" : IECore.IntData( 1000 ),
},
"h" : IECore.IntData( 1 )
},
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d["a"], IECore.IntData( 10 ) )
self.assertEqual( d["b"]["c"], IECore.IntData( 20 ) )
self.assertEqual( d["b"]["d"], IECore.IntData( 30 ) )
self.assertEqual( d["b"]["f"]["g"], IECore.IntData( 1000 ) )
self.assertEqual( d["e"], IECore.IntData( 40 ) )
self.assertEqual( d["b"]["h"], IECore.IntData( 1 ) )
self.assertRaises( KeyError, d.__getitem__, "z" )
def testKeys( self ) :
dict1 = {
"a" : 10,
"b" : {
"c" : 20,
"d" : 30,
},
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.IntData( 20 ),
"b" : {
"c" : IECore.IntData( 100 ),
"f" : {
"g" : IECore.IntData( 1000 ),
},
"h" : IECore.IntData( 1 )
},
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( set( d.keys() ), set( [ "a", "b", "e", "i" ] ) )
self.assertEqual( set( d["b"].keys() ), set( [ "c", "d", "f", "h" ] ) )
def testContains( self ) :
dict1 = {
"a" : 10,
"b" : {
},
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"b" : IECore.CompoundObject(),
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assert_( "a" in d )
self.assert_( "b" in d )
self.assert_( "e" in d )
self.assert_( "i" in d )
self.assert_( not "x" in d )
def testGet( self ) :
dict1 = {
"a" : 10,
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.StringData( "hello" ),
"b" : IECore.FloatData( 10 ),
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d.get( "a", None ), 10 )
self.assertEqual( d.get( "b", None ), IECore.FloatData( 10 ) )
self.assertEqual( d.get( "i", None ), IECore.IntData( 1 ) )
self.assertEqual( d.get( "e", None ), 40 )
self.assertEqual( d.get( "x", 11 ), 11 )
def testLayerEditing( self ) :
dict1 = {
"a" : 10,
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.StringData( "hello" ),
"b" : IECore.FloatData( 10 ),
"i" : IECore.IntData( 1 )
}
)
layers = [ dict1, dict2 ]
d = IECore.LayeredDict( layers )
self.failUnless( d.layers is layers )
self.assertEqual( d["a"], 10 )
layers.insert( 0, { "a" : 100 } )
self.assertEqual( d["a"], 100 )
if __name__ == "__main__":
unittest.main()
| lento/cortex | test/IECore/LayeredDictTest.py | Python | bsd-3-clause | 5,259 |
from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.events.event import Event
from crits.events.handlers import add_new_event
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
from crits.vocabulary.events import EventTypes
class EventResource(CRITsAPIResource):
"""
Class to handle everything related to the Event API.
Currently supports GET and POST.
"""
class Meta:
object_class = Event
allowed_methods = ('get', 'post', 'patch')
resource_name = "events"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(EventResource, self).get_object_list(request, Event)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Events through the API.
:param bundle: Bundle containing the information to create the Event.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
title = bundle.data.get('title', None)
description = bundle.data.get('description', None)
event_type = bundle.data.get('event_type', None)
source = bundle.data.get('source', None)
method = bundle.data.get('method', None)
reference = bundle.data.get('reference', None)
date = bundle.data.get('date', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
content = {'return_code': 0,
'type': 'Event'}
if not title or not event_type or not source or not description:
content['message'] = 'Must provide a title, event_type, source, and description.'
self.crits_response(content)
if event_type not in EventTypes.values():
content['message'] = 'Not a valid Event Type.'
self.crits_response(content)
result = add_new_event(title,
description,
event_type,
source,
method,
reference,
date,
analyst,
bucket_list,
ticket)
if result.get('message'):
content['message'] = result.get('message')
content['id'] = result.get('id', '')
if result.get('id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'events',
'api_name': 'v1',
'pk': result.get('id')})
content['url'] = url
if result['success']:
content['return_code'] = 0
self.crits_response(content)
| cfossace/crits | crits/events/api.py | Python | mit | 3,519 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | saeschdivara/myclothing | website/manage.py | Python | mit | 241 |
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unittest for the twisted-style reactor.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import os
import shutil
import signal
import sys
import tempfile
import threading
import warnings
try:
import fcntl
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor
from twisted.internet.protocol import Protocol
from twisted.python import log
from tornado.platform.twisted import TornadoReactor, TwistedIOLoop
from zope.interface import implementer
have_twisted = True
except ImportError:
have_twisted = False
# The core of Twisted 12.3.0 is available on python 3, but twisted.web is not
# so test for it separately.
try:
from twisted.web.client import Agent, readBody
from twisted.web.resource import Resource
from twisted.web.server import Site
# As of Twisted 15.0.0, twisted.web is present but fails our
# tests due to internal str/bytes errors.
have_twisted_web = sys.version_info < (3,)
except ImportError:
have_twisted_web = False
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.platform.select import SelectIOLoop
from tornado.testing import bind_unused_port
from tornado.test.util import unittest
from tornado.util import import_object
from tornado.web import RequestHandler, Application
skipIfNoTwisted = unittest.skipUnless(have_twisted,
"twisted module not present")
skipIfPy26 = unittest.skipIf(sys.version_info < (2, 7),
"twisted incompatible with singledispatch in py26")
def save_signal_handlers():
saved = {}
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGCHLD]:
saved[sig] = signal.getsignal(sig)
if "twisted" in repr(saved):
if not issubclass(IOLoop.configured_class(), TwistedIOLoop):
# when the global ioloop is twisted, we expect the signal
# handlers to be installed. Otherwise, it means we're not
# cleaning up after twisted properly.
raise Exception("twisted signal handlers already installed")
return saved
def restore_signal_handlers(saved):
for sig, handler in saved.items():
signal.signal(sig, handler)
class ReactorTestCase(unittest.TestCase):
def setUp(self):
self._saved_signals = save_signal_handlers()
self._io_loop = IOLoop()
self._reactor = TornadoReactor(self._io_loop)
def tearDown(self):
self._io_loop.close(all_fds=True)
restore_signal_handlers(self._saved_signals)
@skipIfNoTwisted
class ReactorWhenRunningTest(ReactorTestCase):
def test_whenRunning(self):
self._whenRunningCalled = False
self._anotherWhenRunningCalled = False
self._reactor.callWhenRunning(self.whenRunningCallback)
self._reactor.run()
self.assertTrue(self._whenRunningCalled)
self.assertTrue(self._anotherWhenRunningCalled)
def whenRunningCallback(self):
self._whenRunningCalled = True
self._reactor.callWhenRunning(self.anotherWhenRunningCallback)
self._reactor.stop()
def anotherWhenRunningCallback(self):
self._anotherWhenRunningCalled = True
@skipIfNoTwisted
class ReactorCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._laterCalled = False
self._now = self._reactor.seconds()
self._timeout = 0.001
dc = self._reactor.callLater(self._timeout, self.callLaterCallback)
self.assertEqual(self._reactor.getDelayedCalls(), [dc])
self._reactor.run()
self.assertTrue(self._laterCalled)
self.assertTrue(self._called - self._now > self._timeout)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback(self):
self._laterCalled = True
self._called = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorTwoCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._later1Called = False
self._later2Called = False
self._now = self._reactor.seconds()
self._timeout1 = 0.0005
dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1)
self._timeout2 = 0.001
dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2)
self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or
self._reactor.getDelayedCalls() == [dc2, dc1])
self._reactor.run()
self.assertTrue(self._later1Called)
self.assertTrue(self._later2Called)
self.assertTrue(self._called1 - self._now > self._timeout1)
self.assertTrue(self._called2 - self._now > self._timeout2)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback1(self):
self._later1Called = True
self._called1 = self._reactor.seconds()
def callLaterCallback2(self):
self._later2Called = True
self._called2 = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorCallFromThreadTest(ReactorTestCase):
def setUp(self):
super(ReactorCallFromThreadTest, self).setUp()
self._mainThread = thread.get_ident()
def tearDown(self):
self._thread.join()
super(ReactorCallFromThreadTest, self).tearDown()
def _newThreadRun(self):
self.assertNotEqual(self._mainThread, thread.get_ident())
if hasattr(self._thread, 'ident'): # new in python 2.6
self.assertEqual(self._thread.ident, thread.get_ident())
self._reactor.callFromThread(self._fnCalledFromThread)
def _fnCalledFromThread(self):
self.assertEqual(self._mainThread, thread.get_ident())
self._reactor.stop()
def _whenRunningCallback(self):
self._thread = threading.Thread(target=self._newThreadRun)
self._thread.start()
def testCallFromThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
@skipIfNoTwisted
class ReactorCallInThread(ReactorTestCase):
def setUp(self):
super(ReactorCallInThread, self).setUp()
self._mainThread = thread.get_ident()
def _fnCalledInThread(self, *args, **kwargs):
self.assertNotEqual(thread.get_ident(), self._mainThread)
self._reactor.callFromThread(lambda: self._reactor.stop())
def _whenRunningCallback(self):
self._reactor.callInThread(self._fnCalledInThread)
def testCallInThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
class Reader(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Reader"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def readConnectionLost(self, reason):
self.close()
def connectionLost(self, reason):
self.close()
def doRead(self):
self._callback(self._fd)
if have_twisted:
Reader = implementer(IReadDescriptor)(Reader)
class Writer(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Writer"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def connectionLost(self, reason):
self.close()
def doWrite(self):
self._callback(self._fd)
if have_twisted:
Writer = implementer(IWriteDescriptor)(Writer)
@skipIfNoTwisted
class ReactorReaderWriterTest(ReactorTestCase):
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def setUp(self):
super(ReactorReaderWriterTest, self).setUp()
r, w = os.pipe()
self._set_nonblocking(r)
self._set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self._p1 = os.fdopen(r, "rb", 0)
self._p2 = os.fdopen(w, "wb", 0)
def tearDown(self):
super(ReactorReaderWriterTest, self).tearDown()
self._p1.close()
self._p2.close()
def _testReadWrite(self):
"""
In this test the writer writes an 'x' to its fd. The reader
reads it, check the value and ends the test.
"""
self.shouldWrite = True
def checkReadInput(fd):
self.assertEquals(fd.read(1), b'x')
self._reactor.stop()
def writeOnce(fd):
if self.shouldWrite:
self.shouldWrite = False
fd.write(b'x')
self._reader = Reader(self._p1, checkReadInput)
self._writer = Writer(self._p2, writeOnce)
self._reactor.addWriter(self._writer)
# Test that adding the reader twice adds it only once to
# IOLoop.
self._reactor.addReader(self._reader)
self._reactor.addReader(self._reader)
def testReadWrite(self):
self._reactor.callWhenRunning(self._testReadWrite)
self._reactor.run()
def _testNoWriter(self):
"""
In this test we have no writer. Make sure the reader doesn't
read anything.
"""
def checkReadInput(fd):
self.fail("Must not be called.")
def stopTest():
# Close the writer here since the IOLoop doesn't know
# about it.
self._writer.close()
self._reactor.stop()
self._reader = Reader(self._p1, checkReadInput)
# We create a writer, but it should never be invoked.
self._writer = Writer(self._p2, lambda fd: fd.write('x'))
# Test that adding and removing the writer leaves us with no writer.
self._reactor.addWriter(self._writer)
self._reactor.removeWriter(self._writer)
# Test that adding and removing the reader doesn't cause
# unintended effects.
self._reactor.addReader(self._reader)
# Wake up after a moment and stop the test
self._reactor.callLater(0.001, stopTest)
def testNoWriter(self):
self._reactor.callWhenRunning(self._testNoWriter)
self._reactor.run()
# Test various combinations of twisted and tornado http servers,
# http clients, and event loop interfaces.
@skipIfNoTwisted
@unittest.skipIf(not have_twisted_web, 'twisted web not present')
class CompatibilityTests(unittest.TestCase):
def setUp(self):
self.saved_signals = save_signal_handlers()
self.io_loop = IOLoop()
self.io_loop.make_current()
self.reactor = TornadoReactor(self.io_loop)
def tearDown(self):
self.reactor.disconnectAll()
self.io_loop.clear_current()
self.io_loop.close(all_fds=True)
restore_signal_handlers(self.saved_signals)
def start_twisted_server(self):
class HelloResource(Resource):
isLeaf = True
def render_GET(self, request):
return "Hello from twisted!"
site = Site(HelloResource())
port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
self.twisted_port = port.getHost().port
def start_tornado_server(self):
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello from tornado!")
app = Application([('/', HelloHandler)],
log_function=lambda x: None)
server = HTTPServer(app, io_loop=self.io_loop)
sock, self.tornado_port = bind_unused_port()
server.add_sockets([sock])
def run_ioloop(self):
self.stop_loop = self.io_loop.stop
self.io_loop.start()
self.reactor.fireSystemEvent('shutdown')
def run_reactor(self):
self.stop_loop = self.reactor.stop
self.stop = self.reactor.stop
self.reactor.run()
def tornado_fetch(self, url, runner):
responses = []
client = AsyncHTTPClient(self.io_loop)
def callback(response):
responses.append(response)
self.stop_loop()
client.fetch(url, callback=callback)
runner()
self.assertEqual(len(responses), 1)
responses[0].rethrow()
return responses[0]
def twisted_fetch(self, url, runner):
# http://twistedmatrix.com/documents/current/web/howto/client.html
chunks = []
client = Agent(self.reactor)
d = client.request(b'GET', utf8(url))
class Accumulator(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
chunks.append(data)
def connectionLost(self, reason):
self.finished.callback(None)
def callback(response):
finished = Deferred()
response.deliverBody(Accumulator(finished))
return finished
d.addCallback(callback)
def shutdown(failure):
if hasattr(self, 'stop_loop'):
self.stop_loop()
elif failure is not None:
# loop hasn't been initialized yet; try our best to
# get an error message out. (the runner() interaction
# should probably be refactored).
try:
failure.raiseException()
except:
logging.error('exception before starting loop', exc_info=True)
d.addBoth(shutdown)
runner()
self.assertTrue(chunks)
return ''.join(chunks)
def twisted_coroutine_fetch(self, url, runner):
body = [None]
@gen.coroutine
def f():
# This is simpler than the non-coroutine version, but it cheats
# by reading the body in one blob instead of streaming it with
# a Protocol.
client = Agent(self.reactor)
response = yield client.request(b'GET', utf8(url))
with warnings.catch_warnings():
# readBody has a buggy DeprecationWarning in Twisted 15.0:
# https://twistedmatrix.com/trac/changeset/43379
warnings.simplefilter('ignore', category=DeprecationWarning)
body[0] = yield readBody(response)
self.stop_loop()
self.io_loop.add_callback(f)
runner()
return body[0]
def testTwistedServerTornadoClientIOLoop(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop)
self.assertEqual(response.body, 'Hello from twisted!')
def testTwistedServerTornadoClientReactor(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
self.assertEqual(response.body, 'Hello from twisted!')
def testTornadoServerTwistedClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from tornado!')
def testTornadoServerTwistedClientReactor(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
self.assertEqual(response, 'Hello from tornado!')
@skipIfPy26
def testTornadoServerTwistedCoroutineClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_coroutine_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from tornado!')
@skipIfNoTwisted
@skipIfPy26
class ConvertDeferredTest(unittest.TestCase):
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
f = gen.convert_yielded(fn())
self.assertEqual(f.result(), 42)
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
f = gen.convert_yielded(fn())
with self.assertRaises(ZeroDivisionError):
f.result()
if have_twisted:
# Import and run as much of twisted's test suite as possible.
# This is unfortunately rather dependent on implementation details,
# but there doesn't appear to be a clean all-in-one conformance test
# suite for reactors.
#
# This is a list of all test suites using the ReactorBuilder
# available in Twisted 11.0.0 and 11.1.0 (and a blacklist of
# specific test methods to be disabled).
twisted_tests = {
'twisted.internet.test.test_core.ObjectModelIntegrationTest': [],
'twisted.internet.test.test_core.SystemEventTestsBuilder': [
'test_iterate', # deliberately not supported
# Fails on TwistedIOLoop and AsyncIOLoop.
'test_runAfterCrash',
],
'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [
"test_lostFileDescriptor", # incompatible with epoll and kqueue
],
'twisted.internet.test.test_process.ProcessTestsBuilder': [
# Only work as root. Twisted's "skip" functionality works
# with py27+, but not unittest2 on py26.
'test_changeGID',
'test_changeUID',
# This test sometimes fails with EPIPE on a call to
# kqueue.control. Happens consistently for me with
# trollius but not asyncio or other IOLoops.
'test_childConnectionLost',
],
# Process tests appear to work on OSX 10.7, but not 10.6
# 'twisted.internet.test.test_process.PTYProcessTestsBuilder': [
# 'test_systemCallUninterruptedByChildExit',
# ],
'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [
'test_badContext', # ssl-related; see also SSLClientTestsMixin
],
'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [
# These use link-local addresses and cause firewall prompts on mac
'test_buildProtocolIPv6AddressScopeID',
'test_portGetHostOnIPv6ScopeID',
'test_serverGetHostOnIPv6ScopeID',
'test_serverGetPeerOnIPv6ScopeID',
],
'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [],
'twisted.internet.test.test_tcp.WriteSequenceTests': [],
'twisted.internet.test.test_tcp.AbortConnectionTestCase': [],
'twisted.internet.test.test_threads.ThreadTestsBuilder': [],
'twisted.internet.test.test_time.TimeTestsBuilder': [],
# Extra third-party dependencies (pyOpenSSL)
# 'twisted.internet.test.test_tls.SSLClientTestsMixin': [],
'twisted.internet.test.test_udp.UDPServerTestsBuilder': [],
'twisted.internet.test.test_unix.UNIXTestsBuilder': [
# Platform-specific. These tests would be skipped automatically
# if we were running twisted's own test runner.
'test_connectToLinuxAbstractNamespace',
'test_listenOnLinuxAbstractNamespace',
# These tests use twisted's sendmsg.c extension and sometimes
# fail with what looks like uninitialized memory errors
# (more common on pypy than cpython, but I've seen it on both)
'test_sendFileDescriptor',
'test_sendFileDescriptorTriggersPauseProducing',
'test_descriptorDeliveredBeforeBytes',
'test_avoidLeakingFileDescriptors',
],
'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [
'test_listenOnLinuxAbstractNamespace',
],
'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [],
}
if sys.version_info >= (3,):
# In Twisted 15.2.0 on Python 3.4, the process tests will try to run
# but fail, due in part to interactions between Tornado's strict
# warnings-as-errors policy and Twisted's own warning handling
# (it was not obvious how to configure the warnings module to
# reconcile the two), and partly due to what looks like a packaging
# error (process_cli.py missing). For now, just skip it.
del twisted_tests['twisted.internet.test.test_process.ProcessTestsBuilder']
for test_name, blacklist in twisted_tests.items():
try:
test_class = import_object(test_name)
except (ImportError, AttributeError):
continue
for test_func in blacklist:
if hasattr(test_class, test_func):
# The test_func may be defined in a mixin, so clobber
# it instead of delattr()
setattr(test_class, test_func, lambda self: None)
def make_test_subclass(test_class):
class TornadoTest(test_class):
_reactors = ["tornado.platform.twisted._TestReactor"]
def setUp(self):
# Twisted's tests expect to be run from a temporary
# directory; they create files in their working directory
# and don't always clean up after themselves.
self.__curdir = os.getcwd()
self.__tempdir = tempfile.mkdtemp()
os.chdir(self.__tempdir)
super(TornadoTest, self).setUp()
def tearDown(self):
super(TornadoTest, self).tearDown()
os.chdir(self.__curdir)
shutil.rmtree(self.__tempdir)
def flushWarnings(self, *args, **kwargs):
# This is a hack because Twisted and Tornado have
# differing approaches to warnings in tests.
# Tornado sets up a global set of warnings filters
# in runtests.py, while Twisted patches the filter
# list in each test. The net effect is that
# Twisted's tests run with Tornado's increased
# strictness (BytesWarning and ResourceWarning are
# enabled) but without our filter rules to ignore those
# warnings from Twisted code.
filtered = []
for w in super(TornadoTest, self).flushWarnings(
*args, **kwargs):
if w['category'] in (BytesWarning, ResourceWarning):
continue
filtered.append(w)
return filtered
def buildReactor(self):
self.__saved_signals = save_signal_handlers()
return test_class.buildReactor(self)
def unbuildReactor(self, reactor):
test_class.unbuildReactor(self, reactor)
# Clean up file descriptors (especially epoll/kqueue
# objects) eagerly instead of leaving them for the
# GC. Unfortunately we can't do this in reactor.stop
# since twisted expects to be able to unregister
# connections in a post-shutdown hook.
reactor._io_loop.close(all_fds=True)
restore_signal_handlers(self.__saved_signals)
TornadoTest.__name__ = test_class.__name__
return TornadoTest
test_subclass = make_test_subclass(test_class)
globals().update(test_subclass.makeTestCaseClasses())
# Since we're not using twisted's test runner, it's tricky to get
# logging set up well. Most of the time it's easiest to just
# leave it turned off, but while working on these tests you may want
# to uncomment one of the other lines instead.
log.defaultObserver.stop()
# import sys; log.startLogging(sys.stderr, setStdout=0)
# log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
# import logging; logging.getLogger('twisted').setLevel(logging.WARNING)
# Twisted recently introduced a new logger; disable that one too.
try:
from twisted.logger import globalLogBeginner
except ImportError:
pass
else:
globalLogBeginner.beginLoggingTo([])
if have_twisted:
class LayeredTwistedIOLoop(TwistedIOLoop):
"""Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop.
This is of course silly, but is useful for testing purposes to make
sure we're implementing both sides of the various interfaces
correctly. In some tests another TornadoReactor is layered on top
of the whole stack.
"""
def initialize(self, **kwargs):
# When configured to use LayeredTwistedIOLoop we can't easily
# get the next-best IOLoop implementation, so use the lowest common
# denominator.
self.real_io_loop = SelectIOLoop(make_current=False)
reactor = TornadoReactor(io_loop=self.real_io_loop)
super(LayeredTwistedIOLoop, self).initialize(reactor=reactor, **kwargs)
self.add_callback(self.make_current)
def close(self, all_fds=False):
super(LayeredTwistedIOLoop, self).close(all_fds=all_fds)
# HACK: This is the same thing that test_class.unbuildReactor does.
for reader in self.reactor._internalReaders:
self.reactor.removeReader(reader)
reader.connectionLost(None)
self.real_io_loop.close(all_fds=all_fds)
def stop(self):
# One of twisted's tests fails if I don't delay crash()
# until the reactor has started, but if I move this to
# TwistedIOLoop then the tests fail when I'm *not* running
# tornado-on-twisted-on-tornado. I'm clearly missing something
# about the startup/crash semantics, but since stop and crash
# are really only used in tests it doesn't really matter.
def f():
self.reactor.crash()
# Become current again on restart. This is needed to
# override real_io_loop's claim to being the current loop.
self.add_callback(self.make_current)
self.reactor.callWhenRunning(f)
if __name__ == "__main__":
unittest.main()
| tao12345666333/tornado-zh | tornado/test/twisted_test.py | Python | mit | 27,525 |
import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
return OpenBazaarContext.create_default_instance()
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def setUp(self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context()
ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callback4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
unittest.main()
| atsuyim/OpenBazaar | tests/test_transport.py | Python | mit | 2,556 |
# -*- coding: utf-8 -*-
"""
test_settings
~~~~~~~~~~~~~
Test the Settings object.
"""
import pytest
import h2.errors
import h2.exceptions
import h2.settings
from hypothesis import given, assume
from hypothesis.strategies import (
integers, booleans, fixed_dictionaries, builds
)
class TestSettings(object):
"""
Test the Settings object behaves as expected.
"""
def test_settings_defaults_client(self):
"""
The Settings object begins with the appropriate defaults for clients.
"""
s = h2.settings.Settings(client=True)
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
assert s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] == 0
def test_settings_defaults_server(self):
"""
The Settings object begins with the appropriate defaults for servers.
"""
s = h2.settings.Settings(client=False)
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 0
assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
assert s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] == 0
@pytest.mark.parametrize('client', [True, False])
def test_can_set_initial_values(self, client):
"""
The Settings object can be provided initial values that override the
defaults.
"""
overrides = {
h2.settings.SettingCodes.HEADER_TABLE_SIZE: 8080,
h2.settings.SettingCodes.MAX_FRAME_SIZE: 16388,
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 2**16,
h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL: 1,
}
s = h2.settings.Settings(client=client, initial_values=overrides)
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8080
assert s[h2.settings.SettingCodes.ENABLE_PUSH] == bool(client)
assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16388
assert s[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS] == 100
assert s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] == 2**16
assert s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] == 1
@pytest.mark.parametrize(
'setting,value',
[
(h2.settings.SettingCodes.ENABLE_PUSH, 2),
(h2.settings.SettingCodes.ENABLE_PUSH, -1),
(h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, -1),
(h2.settings.SettingCodes.INITIAL_WINDOW_SIZE, 2**34),
(h2.settings.SettingCodes.MAX_FRAME_SIZE, 1),
(h2.settings.SettingCodes.MAX_FRAME_SIZE, 2**30),
(h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE, -1),
(h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL, -1),
]
)
def test_cannot_set_invalid_initial_values(self, setting, value):
"""
The Settings object can be provided initial values that override the
defaults.
"""
overrides = {setting: value}
with pytest.raises(h2.exceptions.InvalidSettingsValueError):
h2.settings.Settings(initial_values=overrides)
def test_applying_value_doesnt_take_effect_immediately(self):
"""
When a value is applied to the settings object, it doesn't immediately
take effect.
"""
s = h2.settings.Settings(client=True)
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
def test_acknowledging_values(self):
"""
When we acknowledge settings, the values change.
"""
s = h2.settings.Settings(client=True)
old_settings = dict(s)
new_settings = {
h2.settings.SettingCodes.HEADER_TABLE_SIZE: 4000,
h2.settings.SettingCodes.ENABLE_PUSH: 0,
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE: 60,
h2.settings.SettingCodes.MAX_FRAME_SIZE: 16385,
h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL: 1,
}
s.update(new_settings)
assert dict(s) == old_settings
s.acknowledge()
assert dict(s) == new_settings
def test_acknowledging_returns_the_changed_settings(self):
"""
Acknowledging settings returns the changes.
"""
s = h2.settings.Settings(client=True)
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] = 8000
s[h2.settings.SettingCodes.ENABLE_PUSH] = 0
changes = s.acknowledge()
assert len(changes) == 2
table_size_change = (
changes[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
)
push_change = changes[h2.settings.SettingCodes.ENABLE_PUSH]
assert table_size_change.setting == (
h2.settings.SettingCodes.HEADER_TABLE_SIZE
)
assert table_size_change.original_value == 4096
assert table_size_change.new_value == 8000
assert push_change.setting == h2.settings.SettingCodes.ENABLE_PUSH
assert push_change.original_value == 1
assert push_change.new_value == 0
def test_acknowledging_only_returns_changed_settings(self):
"""
Acknowledging settings does not return unchanged settings.
"""
s = h2.settings.Settings(client=True)
s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] = 70
changes = s.acknowledge()
assert len(changes) == 1
assert list(changes.keys()) == [
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE
]
def test_deleting_values_deletes_all_of_them(self):
"""
When we delete a key we lose all state about it.
"""
s = h2.settings.Settings(client=True)
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
del s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
with pytest.raises(KeyError):
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
def test_length_correctly_reported(self):
"""
Length is related only to the number of keys.
"""
s = h2.settings.Settings(client=True)
assert len(s) == 5
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 8000
assert len(s) == 5
s.acknowledge()
assert len(s) == 5
del s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
assert len(s) == 4
def test_new_values_work(self):
"""
New values initially don't appear
"""
s = h2.settings.Settings(client=True)
s[80] = 81
with pytest.raises(KeyError):
s[80]
def test_new_values_follow_basic_acknowledgement_rules(self):
"""
A new value properly appears when acknowledged.
"""
s = h2.settings.Settings(client=True)
s[80] = 81
changed_settings = s.acknowledge()
assert s[80] == 81
assert len(changed_settings) == 1
changed = changed_settings[80]
assert changed.setting == 80
assert changed.original_value is None
assert changed.new_value == 81
def test_single_values_arent_affected_by_acknowledgement(self):
"""
When acknowledged, unchanged settings remain unchanged.
"""
s = h2.settings.Settings(client=True)
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
s.acknowledge()
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 4096
def test_settings_getters(self):
"""
Getters exist for well-known settings.
"""
s = h2.settings.Settings(client=True)
assert s.header_table_size == (
s[h2.settings.SettingCodes.HEADER_TABLE_SIZE]
)
assert s.enable_push == s[h2.settings.SettingCodes.ENABLE_PUSH]
assert s.initial_window_size == (
s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE]
)
assert s.max_frame_size == s[h2.settings.SettingCodes.MAX_FRAME_SIZE]
assert s.max_concurrent_streams == 2**32 + 1 # A sensible default.
assert s.max_header_list_size is None
assert s.enable_connect_protocol == s[
h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
]
def test_settings_setters(self):
"""
Setters exist for well-known settings.
"""
s = h2.settings.Settings(client=True)
s.header_table_size = 0
s.enable_push = 1
s.initial_window_size = 2
s.max_frame_size = 16385
s.max_concurrent_streams = 4
s.max_header_list_size = 2**16
s.enable_connect_protocol = 1
s.acknowledge()
assert s[h2.settings.SettingCodes.HEADER_TABLE_SIZE] == 0
assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 2
assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16385
assert s[h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS] == 4
assert s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] == 2**16
assert s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] == 1
@given(integers())
def test_cannot_set_invalid_values_for_enable_push(self, val):
"""
SETTINGS_ENABLE_PUSH only allows two values: 0, 1.
"""
assume(val not in (0, 1))
s = h2.settings.Settings()
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s.enable_push = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s.enable_push == 1
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s[h2.settings.SettingCodes.ENABLE_PUSH] = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s[h2.settings.SettingCodes.ENABLE_PUSH] == 1
@given(integers())
def test_cannot_set_invalid_vals_for_initial_window_size(self, val):
"""
SETTINGS_INITIAL_WINDOW_SIZE only allows values between 0 and 2**32 - 1
inclusive.
"""
s = h2.settings.Settings()
if 0 <= val <= 2**31 - 1:
s.initial_window_size = val
s.acknowledge()
assert s.initial_window_size == val
else:
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s.initial_window_size = val
s.acknowledge()
assert (
e.value.error_code == h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
)
assert s.initial_window_size == 65535
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] = val
s.acknowledge()
assert (
e.value.error_code == h2.errors.ErrorCodes.FLOW_CONTROL_ERROR
)
assert s[h2.settings.SettingCodes.INITIAL_WINDOW_SIZE] == 65535
@given(integers())
def test_cannot_set_invalid_values_for_max_frame_size(self, val):
"""
SETTINGS_MAX_FRAME_SIZE only allows values between 2**14 and 2**24 - 1.
"""
s = h2.settings.Settings()
if 2**14 <= val <= 2**24 - 1:
s.max_frame_size = val
s.acknowledge()
assert s.max_frame_size == val
else:
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s.max_frame_size = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s.max_frame_size == 16384
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s[h2.settings.SettingCodes.MAX_FRAME_SIZE] = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s[h2.settings.SettingCodes.MAX_FRAME_SIZE] == 16384
@given(integers())
def test_cannot_set_invalid_values_for_max_header_list_size(self, val):
"""
SETTINGS_MAX_HEADER_LIST_SIZE only allows non-negative values.
"""
s = h2.settings.Settings()
if val >= 0:
s.max_header_list_size = val
s.acknowledge()
assert s.max_header_list_size == val
else:
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s.max_header_list_size = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s.max_header_list_size is None
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE] = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
with pytest.raises(KeyError):
s[h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE]
@given(integers())
def test_cannot_set_invalid_values_for_enable_connect_protocol(self, val):
"""
SETTINGS_ENABLE_CONNECT_PROTOCOL only allows two values: 0, 1.
"""
assume(val not in (0, 1))
s = h2.settings.Settings()
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s.enable_connect_protocol = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s.enable_connect_protocol == 0
with pytest.raises(h2.exceptions.InvalidSettingsValueError) as e:
s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] = val
s.acknowledge()
assert e.value.error_code == h2.errors.ErrorCodes.PROTOCOL_ERROR
assert s[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL] == 0
class TestSettingsEquality(object):
"""
A class defining tests for the standard implementation of == and != .
"""
SettingsStrategy = builds(
h2.settings.Settings,
client=booleans(),
initial_values=fixed_dictionaries({
h2.settings.SettingCodes.HEADER_TABLE_SIZE:
integers(0, 2**32 - 1),
h2.settings.SettingCodes.ENABLE_PUSH: integers(0, 1),
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE:
integers(0, 2**31 - 1),
h2.settings.SettingCodes.MAX_FRAME_SIZE:
integers(2**14, 2**24 - 1),
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS:
integers(0, 2**32 - 1),
h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE:
integers(0, 2**32 - 1),
})
)
@given(settings=SettingsStrategy)
def test_equality_reflexive(self, settings):
"""
An object compares equal to itself using the == operator and the !=
operator.
"""
assert (settings == settings)
assert not (settings != settings)
@given(settings=SettingsStrategy, o_settings=SettingsStrategy)
def test_equality_multiple(self, settings, o_settings):
"""
Two objects compare themselves using the == operator and the !=
operator.
"""
if settings == o_settings:
assert settings == o_settings
assert not (settings != o_settings)
else:
assert settings != o_settings
assert not (settings == o_settings)
@given(settings=SettingsStrategy)
def test_another_type_equality(self, settings):
"""
The object does not compare equal to an object of an unrelated type
(which does not implement the comparison) using the == operator.
"""
obj = object()
assert (settings != obj)
assert not (settings == obj)
@given(settings=SettingsStrategy)
def test_delegated_eq(self, settings):
"""
The result of comparison is delegated to the right-hand operand if
it is of an unrelated type.
"""
class Delegate(object):
def __eq__(self, other):
return [self]
def __ne__(self, other):
return [self]
delg = Delegate()
assert (settings == delg) == [delg]
assert (settings != delg) == [delg]
| KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/h2/test/test_settings.py | Python | mpl-2.0 | 16,680 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-2013 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import openerp.pooler as pooler
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
def html_sanitize(src):
if not src:
return src
src = ustr(src, errors='replace')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
try:
cleaner = clean.Cleaner(page_structure=True, style=False, safe_attrs_only=False, forms=False, kill_tags=tags_to_kill, remove_tags=tags_to_remove)
cleaned = cleaner.clean_html(src)
except TypeError:
# lxml.clean version < 2.3.1 does not have a kill_tags attribute
# to remove in 2014
cleaner = clean.Cleaner(page_structure=True, style=False, safe_attrs_only=False, forms=False, remove_tags=tags_to_kill + tags_to_remove)
cleaned = cleaner.clean_html(src)
except Exception, e:
if isinstance(e, etree.ParserError) and 'empty' in str(e):
return ""
_logger.warning('html_sanitize failed to parse %s' % (src))
cleaned = '<p>Impossible to parse</p>'
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html):
""" html_email_clean: clean the html to display in the web client.
- strip email quotes (remove blockquote nodes)
- strip signatures (remove --\n{\n)Blahblah), by replacing <br> by
\n to avoid ignoring signatures converted into html
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore takes as input
html code coming from a sanitized source, like fields.html.
"""
def _replace_matching_regex(regex, source, replace=''):
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# 0. remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# 1. <br[ /]> -> \n, because otherwise the tree is obfuscated
br_tags = re.compile(r'([<]\s*[bB][rR]\s*\/?[>])')
html = _replace_matching_regex(br_tags, html, '__BR_TAG__')
# 2. form a tree, handle (currently ?) pure-text by enclosing them in a pre
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
# 2.5 remove quoted text in nodes
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
for node in root.getiterator():
if not node.text:
continue
node.text = _replace_matching_regex(quote_tags, node.text)
# 3. remove blockquotes
quotes = [el for el in root.getiterator(tag='blockquote')]
for node in quotes:
# copy the node tail into parent text
if node.tail:
parent = node.getparent()
parent.text = parent.text or '' + node.tail
# remove the node
node.getparent().remove(node)
# 4. strip signatures
signature = re.compile(r'([-]{2}[\s]?[\r\n]{1,2}[^\z]+)')
for elem in root.getiterator():
if elem.text:
match = re.search(signature, elem.text)
if match:
elem.text = elem.text[:match.start()] + elem.text[match.end():]
if elem.tail:
match = re.search(signature, elem.tail)
if match:
elem.tail = elem.tail[:match.start()] + elem.tail[match.end():]
# 5. \n back to <br/>
html = etree.tostring(root, pretty_print=True)
html = html.replace('__BR_TAG__', '<br />')
# 6. Misc cleaning :
# - ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)')
html = _replace_matching_regex(br_div_tags, html, '<br />')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?html.*>|</?body.*>|<!\W*DOCTYPE.*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
email_re = re.compile(r"""
([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part
@ # mandatory @ sign
[a-zA-Z0-9][\w\.-]* # domain must start with a letter ... Ged> why do we include a 0-9 then?
\.
[a-z]{2,3} # TLD
)
""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?.*@(.*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = pooler.get_db(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = pooler.get_pool(cr.dbname).get('ir.mail_server')
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]] | 3dfxmadscientist/CBSS | openerp/tools/mail.py | Python | agpl-3.0 | 14,926 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator, \
DataFlowJavaOperator, DataflowTemplateOperator
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator
from airflow.version import version
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-dataflow-operator'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
JAR_FILE = 'example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.contrib.operators.dataflow_operator.{}'
class DataFlowPythonOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowPythonOperator(
task_id=TASK_ID,
py_file=PY_FILE,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_download_hook.assert_called_once_with(PY_FILE)
start_python_hook.assert_called_once_with(TASK_ID, expected_options,
mock.ANY, PY_OPTIONS)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class DataFlowJavaOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowJavaOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(TASK_ID, mock.ANY,
mock.ANY, JOB_CLASS)
class DataFlowTemplateOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplateOperator(
task_id=TASK_ID,
template=TEMPLATE,
parameters=PARAMETERS,
dataflow_default_options=DEFAULT_OPTIONS_TEMPLATE,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.template, TEMPLATE)
self.assertEqual(self.dataflow.parameters, PARAMETERS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_TEMPLATE)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
def test_exec(self, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
start_template_hook.assert_called_once_with(TASK_ID, expected_options,
PARAMETERS, TEMPLATE)
| subodhchhabra/airflow | tests/contrib/operators/test_dataflow_operator.py | Python | apache-2.0 | 7,353 |
# -*- coding: utf-8 -*-
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db import migrations
def clear_message_sent_by_message_type_values(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
UserCount = apps.get_model('analytics', 'UserCount')
StreamCount = apps.get_model('analytics', 'StreamCount')
RealmCount = apps.get_model('analytics', 'RealmCount')
InstallationCount = apps.get_model('analytics', 'InstallationCount')
FillState = apps.get_model('analytics', 'FillState')
property = 'messages_sent:message_type:day'
UserCount.objects.filter(property=property).delete()
StreamCount.objects.filter(property=property).delete()
RealmCount.objects.filter(property=property).delete()
InstallationCount.objects.filter(property=property).delete()
FillState.objects.filter(property=property).delete()
class Migration(migrations.Migration):
dependencies = [('analytics', '0009_remove_messages_to_stream_stat')]
operations = [
migrations.RunPython(clear_message_sent_by_message_type_values),
]
| christi3k/zulip | analytics/migrations/0010_clear_messages_sent_values.py | Python | apache-2.0 | 1,174 |
"""
Classes for easy interpolation of trajectories and Curves.
Requires Scipy installed.
"""
import numpy as np
class Interpolator:
""" Poorman's linear interpolator, doesn't require Scipy. """
def __init__(self, tt=None, ss=None, ttss = None, left=None, right=None):
if ttss is not None:
tt, ss = zip(*ttss)
self.tt = 1.0*np.array(tt)
self.ss = 1.0*np.array(ss)
self.left = left
self.right = right
self.tmin, self.tmax = min(tt), max(tt)
def __call__(self, t):
return np.interp(t, self.tt, self.ss, self.left, self.right)
class Trajectory:
def __init__(self, tt, xx, yy):
self.tt = 1.0*np.array(tt)
self.xx = np.array(xx)
self.yy = np.array(yy)
self.update_interpolators()
def __call__(self, t):
return np.array([self.xi(t), self.yi(t)])
def addx(self, x):
return Trajectory(self.tt, self.xx+x, self.yy)
def addy(self, y):
return Trajectory(self.tt, self.xx+y, self.yy)
def update_interpolators(self):
self.xi = Interpolator(self.tt, self.xx)
self.yi = Interpolator(self.tt, self.yy)
def txy(self, tms=False):
return zip((1000 if tms else 1)*self.tt, self.xx, self.yy)
def to_file(self, filename):
np.savetxt(filename, np.array(self.txy(tms=True)),
fmt="%d", delimiter='\t')
@staticmethod
def from_file(filename):
arr = np.loadtxt(filename, delimiter='\t')
tt, xx, yy = arr.T
return Trajectory(1.0*tt/1000, xx, yy)
@staticmethod
def save_list(trajs, filename):
N = len(trajs)
arr = np.hstack([np.array(t.txy(tms=True)) for t in trajs])
np.savetxt( filename, arr, fmt="%d", delimiter='\t',
header = "\t".join(N*['t (ms)', 'x', 'y']))
@staticmethod
def load_list(filename):
arr = np.loadtxt(filename, delimiter='\t').T
Nlines = arr.shape[0]
return [Trajectory(tt=1.0*a[0]/1000, xx=a[1], yy=a[2])
for a in np.split(arr, Nlines/3)]
| kerimlcr/ab2017-dpyo | ornek/moviepy/moviepy-0.2.2.12/moviepy/video/tools/interpolators.py | Python | gpl-3.0 | 2,120 |
from setuptools import setup
version = '1.4'
testing_extras = ['nose', 'coverage']
docs_extras = ['Sphinx']
setup(
name='WebOb',
version=version,
description="WSGI request and response object",
long_description="""\
WebOb provides wrappers around the WSGI request environment, and an
object to help create WSGI responses.
The objects map much of the specified behavior of HTTP, including
header parsing and accessors for other standard parts of the
environment.
You may install the `in-development version of WebOb
<https://github.com/Pylons/webob/zipball/master#egg=WebOb-dev>`_ with
``pip install WebOb==dev`` (or ``easy_install WebOb==dev``).
* `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_
* `Bug tracker <https://github.com/Pylons/webob/issues>`_
* `Browse source code <https://github.com/Pylons/webob>`_
* `Mailing list <http://bit.ly/paste-users>`_
* `Release news <http://docs.webob.org/en/latest/news.html>`_
* `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_
""",
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords='wsgi request web http',
author='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Pylons Project',
url='http://webob.org/',
license='MIT',
packages=['webob'],
zip_safe=True,
test_suite='nose.collector',
tests_require=['nose'],
extras_require = {
'testing':testing_extras,
'docs':docs_extras,
},
)
| nirmeshk/oh-mainline | vendor/packages/webob/setup.py | Python | agpl-3.0 | 2,150 |
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from nova.cells import rpcapi as cells_rpcapi
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
class _TestBlockDeviceMappingObject(object):
def fake_bdm(self, instance=None):
instance = instance or {}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': instance.get('uuid') or 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1
})
if instance:
fake_bdm['instance'] = instance
return fake_bdm
def _test_save(self, cell_type=None):
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
fake_bdm = self.fake_bdm()
with test.nested(
mock.patch.object(
db, 'block_device_mapping_update', return_value=fake_bdm),
mock.patch.object(
cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')
) as (bdm_update_mock, cells_update_mock):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
bdm_object.volume_id = 'fake_volume_id'
bdm_object.save()
bdm_update_mock.assert_called_once_with(
self.context, 123, {'volume_id': 'fake_volume_id'},
legacy=False)
if cell_type != 'compute':
self.assertFalse(cells_update_mock.called)
else:
self.assertEqual(1, cells_update_mock.call_count)
self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
self.assertIsInstance(cells_update_mock.call_args[0][1],
block_device_obj.BlockDeviceMapping)
self.assertEqual(cells_update_mock.call_args[1], {})
def test_save_nocells(self):
self._test_save()
def test_save_apicell(self):
self._test_save(cell_type='api')
def test_save_computecell(self):
self._test_save(cell_type='compute')
def test_save_instance_changed(self):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.instance = objects.Instance()
self.assertRaises(exception.ObjectActionError,
bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_update', return_value=None)
def test_save_not_found(self, bdm_update):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
self.assertRaises(exception.BDMNotFound, bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id(self, get_by_vol_id):
get_by_vol_id.return_value = self.fake_bdm()
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id')
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertFalse(vol_bdm.obj_attr_is_set(attr))
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id_not_found(self, get_by_vol_id):
get_by_vol_id.return_value = None
self.assertRaises(exception.VolumeBDMNotFound,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id')
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id):
fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
get_by_vol_id.return_value = fake_bdm_vol
self.assertRaises(exception.InvalidVolume,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id',
instance_uuid='fake-instance')
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id_with_expected(self, get_by_vol_id):
get_by_vol_id.return_value = self.fake_bdm(
fake_instance.fake_db_instance())
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id', expected_attrs=['instance'])
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertTrue(vol_bdm.obj_attr_is_set(attr))
get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
['instance'])
def _test_create_mocked(self, cell_type=None, update_or_create=False,
device_name=None):
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': 'fake-instance'}
if device_name:
values['device_name'] = device_name
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
with test.nested(
mock.patch.object(
db, 'block_device_mapping_create', return_value=fake_bdm),
mock.patch.object(
db, 'block_device_mapping_update_or_create',
return_value=fake_bdm),
mock.patch.object(cells_rpcapi.CellsAPI,
'bdm_update_or_create_at_top')
) as (bdm_create_mock, bdm_update_or_create_mock, cells_update_mock):
bdm = objects.BlockDeviceMapping(context=self.context, **values)
if update_or_create:
method = bdm.update_or_create
else:
method = bdm.create
if cell_type == 'api':
self.assertRaises(exception.ObjectActionError,
method)
else:
method()
if update_or_create:
bdm_update_or_create_mock.assert_called_once_with(
self.context, values, legacy=False)
else:
bdm_create_mock.assert_called_once_with(
self.context, values, legacy=False)
if cell_type == 'compute' and 'device_name' in values:
self.assertEqual(1, cells_update_mock.call_count)
self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
self.assertEqual(cells_update_mock.call_args[0][0],
self.context)
self.assertIsInstance(cells_update_mock.call_args[0][1],
block_device_obj.BlockDeviceMapping)
self.assertEqual(cells_update_mock.call_args[1],
{'create': update_or_create or None})
else:
self.assertFalse(cells_update_mock.called)
def test_create_nocells(self):
self._test_create_mocked()
def test_update_or_create(self):
self._test_create_mocked(update_or_create=True)
def test_create_apicell(self):
self._test_create_mocked(cell_type='api')
def test_update_or_create_apicell(self):
self._test_create_mocked(cell_type='api', update_or_create=True)
def test_create_computecell(self):
self._test_create_mocked(cell_type='compute')
def test_update_or_create_computecell(self):
self._test_create_mocked(cell_type='compute', update_or_create=True)
def test_device_name_compute_cell(self):
self._test_create_mocked(cell_type='compute', device_name='/dev/xvdb')
def test_create(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': 'fake-instance'}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
with mock.patch.object(cells_rpcapi.CellsAPI,
'bdm_update_or_create_at_top'):
bdm.create()
for k, v in six.iteritems(values):
self.assertEqual(v, getattr(bdm, k))
def test_create_fails(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': 'fake-instance'}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
bdm.create()
self.assertRaises(exception.ObjectActionError,
bdm.create)
def test_create_fails_instance(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': 'fake-instance',
'instance': objects.Instance()}
bdm = objects.BlockDeviceMapping(context=self.context, **values)
self.assertRaises(exception.ObjectActionError,
bdm.create)
def _test_destroy_mocked(self, cell_type=None):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume', 'id': 1,
'instance_uuid': 'fake-instance', 'device_name': 'fake'}
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
with test.nested(
mock.patch.object(db, 'block_device_mapping_destroy'),
mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top')
) as (bdm_del, cells_destroy):
bdm = objects.BlockDeviceMapping(context=self.context, **values)
bdm.destroy()
bdm_del.assert_called_once_with(self.context, values['id'])
if cell_type != 'compute':
self.assertFalse(cells_destroy.called)
else:
cells_destroy.assert_called_once_with(
self.context, values['instance_uuid'],
device_name=values['device_name'],
volume_id=values['volume_id'])
def test_destroy_nocells(self):
self._test_destroy_mocked()
def test_destroy_apicell(self):
self._test_destroy_mocked(cell_type='api')
def test_destroy_computecell(self):
self._test_destroy_mocked(cell_type='compute')
def test_is_image_true(self):
bdm = objects.BlockDeviceMapping(context=self.context,
source_type='image')
self.assertTrue(bdm.is_image)
def test_is_image_false(self):
bdm = objects.BlockDeviceMapping(context=self.context,
source_type='snapshot')
self.assertFalse(bdm.is_image)
def test_is_volume_true(self):
bdm = objects.BlockDeviceMapping(context=self.context,
destination_type='volume')
self.assertTrue(bdm.is_volume)
def test_is_volume_false(self):
bdm = objects.BlockDeviceMapping(context=self.context,
destination_type='local')
self.assertFalse(bdm.is_volume)
class TestBlockDeviceMappingObject(test_objects._LocalTest,
_TestBlockDeviceMappingObject):
pass
class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest,
_TestBlockDeviceMappingObject):
pass
class _TestBlockDeviceMappingListObject(object):
def fake_bdm(self, bdm_id):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': bdm_id, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1,
})
return fake_bdm
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_get_by_instance_uuid(self, get_all_by_inst):
fakes = [self.fake_bdm(123), self.fake_bdm(456)]
get_all_by_inst.return_value = fakes
bdm_list = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, 'fake_instance_uuid'))
for faked, got in zip(fakes, bdm_list):
self.assertIsInstance(got, objects.BlockDeviceMapping)
self.assertEqual(faked['id'], got.id)
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
def test_get_by_instance_uuid_no_result(self, get_all_by_inst):
get_all_by_inst.return_value = None
bdm_list = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, 'fake_instance_uuid'))
self.assertEqual(0, len(bdm_list))
def test_root_volume_metadata(self):
fake_volume = {
'volume_image_metadata': {'vol_test_key': 'vol_test_value'}}
class FakeVolumeApi(object):
def get(*args, **kwargs):
return fake_volume
block_device_mapping = block_device_obj.block_device_make_list(None, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake_volume_id',
'delete_on_termination': False})])
volume_meta = block_device_mapping.root_metadata(
self.context, None, FakeVolumeApi())
self.assertEqual(fake_volume['volume_image_metadata'], volume_meta)
def test_root_image_metadata(self):
fake_image = {'properties': {'img_test_key': 'img_test_value'}}
class FakeImageApi(object):
def show(*args, **kwargs):
return fake_image
block_device_mapping = block_device_obj.block_device_make_list(None, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': "fake-image",
'delete_on_termination': True})])
image_meta = block_device_mapping.root_metadata(
self.context, FakeImageApi(), None)
self.assertEqual(fake_image['properties'], image_meta)
class TestBlockDeviceMappingListObject(test_objects._LocalTest,
_TestBlockDeviceMappingListObject):
pass
class TestRemoteBlockDeviceMappingListObject(
test_objects._RemoteTest, _TestBlockDeviceMappingListObject):
pass
class TestBlockDeviceUtils(test.NoDBTestCase):
def test_make_list_from_dicts(self):
ctx = context.get_admin_context()
dicts = [{'id': 1}, {'id': 2}]
objs = block_device_obj.block_device_make_list_from_dicts(ctx,
dicts)
self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList)
self.assertEqual(2, len(objs))
self.assertEqual(1, objs[0].id)
self.assertEqual(2, objs[1].id)
def test_make_list_from_dicts_empty(self):
ctx = context.get_admin_context()
objs = block_device_obj.block_device_make_list_from_dicts(ctx, [])
self.assertIsInstance(objs, block_device_obj.BlockDeviceMappingList)
self.assertEqual(0, len(objs))
| devendermishrajio/nova_test_latest | nova/tests/unit/objects/test_block_device.py | Python | apache-2.0 | 16,751 |
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class core(object):
def __init__(self, zap):
self.zap = zap
def alert(self, id):
"""
Gets the alert with the given ID, the corresponding HTTP message can be obtained with the 'messageId' field and 'message' API method
"""
return next(self.zap._request(self.zap.base + 'core/view/alert/', {'id' : id}).itervalues())
def alerts(self, baseurl='', start='', count=''):
"""
Gets the alerts raised by ZAP, optionally filtering by URL and paginating with 'start' position and 'count' of alerts
"""
return next(self.zap._request(self.zap.base + 'core/view/alerts/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).itervalues())
def number_of_alerts(self, baseurl=''):
"""
Gets the number of alerts, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfAlerts/', {'baseurl' : baseurl}).itervalues())
@property
def hosts(self):
"""
Gets the name of the hosts accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/hosts/').itervalues())
@property
def sites(self):
"""
Gets the sites accessed through/by ZAP (scheme and domain)
"""
return next(self.zap._request(self.zap.base + 'core/view/sites/').itervalues())
@property
def urls(self):
"""
Gets the URLs accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/urls/').itervalues())
def message(self, id):
"""
Gets the HTTP message with the given ID. Returns the ID, request/response headers and bodies, cookies and note.
"""
return next(self.zap._request(self.zap.base + 'core/view/message/', {'id' : id}).itervalues())
def messages(self, baseurl='', start='', count=''):
"""
Gets the HTTP messages sent by ZAP, request and response, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
return next(self.zap._request(self.zap.base + 'core/view/messages/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).itervalues())
def number_of_messages(self, baseurl=''):
"""
Gets the number of messages, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfMessages/', {'baseurl' : baseurl}).itervalues())
@property
def version(self):
"""
Gets ZAP version
"""
return next(self.zap._request(self.zap.base + 'core/view/version/').itervalues())
@property
def excluded_from_proxy(self):
"""
Gets the regular expressions, applied to URLs, to exclude from the Proxy
"""
return next(self.zap._request(self.zap.base + 'core/view/excludedFromProxy/').itervalues())
@property
def home_directory(self):
return next(self.zap._request(self.zap.base + 'core/view/homeDirectory/').itervalues())
def stats(self, keyprefix=''):
return next(self.zap._request(self.zap.base + 'core/view/stats/', {'keyPrefix' : keyprefix}).itervalues())
@property
def option_http_state_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpStateEnabled/').itervalues())
@property
def option_use_proxy_chain(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChain/').itervalues())
@property
def option_proxy_chain_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainName/').itervalues())
@property
def option_proxy_chain_port(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPort/').itervalues())
@property
def option_proxy_chain_skip_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainSkipName/').itervalues())
@property
def option_use_proxy_chain_auth(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChainAuth/').itervalues())
@property
def option_proxy_chain_user_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainUserName/').itervalues())
@property
def option_proxy_chain_realm(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainRealm/').itervalues())
@property
def option_proxy_chain_password(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPassword/').itervalues())
@property
def option_proxy_chain_prompt(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPrompt/').itervalues())
@property
def option_http_state(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpState/').itervalues())
@property
def option_timeout_in_secs(self):
return next(self.zap._request(self.zap.base + 'core/view/optionTimeoutInSecs/').itervalues())
@property
def option_single_cookie_request_header(self):
return next(self.zap._request(self.zap.base + 'core/view/optionSingleCookieRequestHeader/').itervalues())
@property
def option_proxy_excluded_domains(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomains/').itervalues())
@property
def option_proxy_excluded_domains_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomainsEnabled/').itervalues())
@property
def option_default_user_agent(self):
return next(self.zap._request(self.zap.base + 'core/view/optionDefaultUserAgent/').itervalues())
def shutdown(self, apikey=''):
"""
Shuts down ZAP
"""
return next(self.zap._request(self.zap.base + 'core/action/shutdown/', {'apikey' : apikey}).itervalues())
def new_session(self, name='', overwrite='', apikey=''):
"""
Creates a new session, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/newSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def load_session(self, name, apikey=''):
"""
Loads the session with the given name. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/loadSession/', {'name' : name, 'apikey' : apikey}).itervalues())
def save_session(self, name, overwrite='', apikey=''):
"""
Saves the session with the name supplied, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/saveSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def snapshot_session(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/snapshotSession/', {'apikey' : apikey}).itervalues())
def clear_excluded_from_proxy(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/clearExcludedFromProxy/', {'apikey' : apikey}).itervalues())
def exclude_from_proxy(self, regex, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/excludeFromProxy/', {'regex' : regex, 'apikey' : apikey}).itervalues())
def set_home_directory(self, dir, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setHomeDirectory/', {'dir' : dir, 'apikey' : apikey}).itervalues())
def generate_root_ca(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/generateRootCA/', {'apikey' : apikey}).itervalues())
def send_request(self, request, followredirects='', apikey=''):
"""
Sends the HTTP request, optionally following redirections. Returns the request sent and response received and followed redirections, if any.
"""
return next(self.zap._request(self.zap.base + 'core/action/sendRequest/', {'request' : request, 'followRedirects' : followredirects, 'apikey' : apikey}).itervalues())
def delete_all_alerts(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/deleteAllAlerts/', {'apikey' : apikey}).itervalues())
def run_garbage_collection(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/runGarbageCollection/', {'apikey' : apikey}).itervalues())
def clear_stats(self, keyprefix, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/clearStats/', {'keyPrefix' : keyprefix, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_realm(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainRealm/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_user_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainUserName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_password(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPassword/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_skip_name(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainSkipName/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_default_user_agent(self, string, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionDefaultUserAgent/', {'String' : string, 'apikey' : apikey}).itervalues())
def set_option_http_state_enabled(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionHttpStateEnabled/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_port(self, integer, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPort/', {'Integer' : integer, 'apikey' : apikey}).itervalues())
def set_option_proxy_chain_prompt(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPrompt/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_timeout_in_secs(self, integer, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionTimeoutInSecs/', {'Integer' : integer, 'apikey' : apikey}).itervalues())
def set_option_use_proxy_chain(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChain/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_use_proxy_chain_auth(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChainAuth/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def set_option_single_cookie_request_header(self, boolean, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/setOptionSingleCookieRequestHeader/', {'Boolean' : boolean, 'apikey' : apikey}).itervalues())
def proxy_pac(self, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/proxy.pac/', {'apikey' : apikey}))
def rootcert(self, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/rootcert/', {'apikey' : apikey}))
def setproxy(self, proxy, apikey=''):
return (self.zap._request_other(self.zap.base_other + 'core/other/setproxy/', {'proxy' : proxy, 'apikey' : apikey}))
def xmlreport(self, apikey=''):
"""
Generates a report in XML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/xmlreport/', {'apikey' : apikey}))
def htmlreport(self, apikey=''):
"""
Generates a report in HTML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/htmlreport/', {'apikey' : apikey}))
def message_har(self, id, apikey=''):
"""
Gets the message with the given ID in HAR format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messageHar/', {'id' : id, 'apikey' : apikey}))
def messages_har(self, baseurl='', start='', count='', apikey=''):
"""
Gets the HTTP messages sent through/by ZAP, in HAR format, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messagesHar/', {'baseurl' : baseurl, 'start' : start, 'count' : count, 'apikey' : apikey}))
def send_har_request(self, request, followredirects='', apikey=''):
"""
Sends the first HAR request entry, optionally following redirections. Returns, in HAR format, the request sent and response received and followed redirections, if any.
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/sendHarRequest/', {'request' : request, 'followRedirects' : followredirects, 'apikey' : apikey}))
| 0xkasun/zaproxy | python/api/src/zapv2/core.py | Python | apache-2.0 | 14,644 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_1.so'))
zero_out = _zero_out_module.zero_out
| ivano666/tensorflow | tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_1.py | Python | apache-2.0 | 1,041 |
import sys
import subprocess
MODULES = [
"scipy.cluster",
"scipy.cluster.vq",
"scipy.cluster.hierarchy",
"scipy.constants",
"scipy.fft",
"scipy.fftpack",
"scipy.fftpack.convolve",
"scipy.integrate",
"scipy.interpolate",
"scipy.io",
"scipy.io.arff",
"scipy.io.harwell_boeing",
"scipy.io.idl",
"scipy.io.matlab",
"scipy.io.netcdf",
"scipy.io.wavfile",
"scipy.linalg",
"scipy.linalg.blas",
"scipy.linalg.cython_blas",
"scipy.linalg.lapack",
"scipy.linalg.cython_lapack",
"scipy.linalg.interpolative",
"scipy.misc",
"scipy.ndimage",
"scipy.odr",
"scipy.optimize",
"scipy.signal",
"scipy.signal.windows",
"scipy.sparse",
"scipy.sparse.linalg",
"scipy.sparse.csgraph",
"scipy.spatial",
"scipy.spatial.distance",
"scipy.special",
"scipy.stats",
"scipy.stats.distributions",
"scipy.stats.mstats",
"scipy.stats.contingency"
]
def test_modules_importable():
# Regression test for gh-6793.
# Check that all modules are importable in a new Python process.
# This is not necessarily true if there are import cycles present.
for module in MODULES:
cmd = 'import {}'.format(module)
subprocess.check_call([sys.executable, '-c', cmd])
| WarrenWeckesser/scipy | scipy/_lib/tests/test_import_cycles.py | Python | bsd-3-clause | 1,306 |
keyBindings = { }
from keyids import KEYIDS
from Components.config import config
from Components.RcModel import rc_model
keyDescriptions = [{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("YELLOW",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("REWIND",),
KEYIDS["KEY_PLAYPAUSE"]: ("PLAYPAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAYPAUSE",),
KEYIDS["KEY_NEXTSONG"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",)
},
{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("VIDEO",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("RED", "SHIFT"),
KEYIDS["KEY_PLAYPAUSE"]: ("YELLOW", "SHIFT"),
KEYIDS["KEY_PLAY"]: ("GREEN", "SHIFT"),
KEYIDS["KEY_NEXTSONG"]: ("BLUE", "SHIFT"),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("TV", "SHIFT"),
KEYIDS["KEY_RECORD"]: ("RADIO", "SHIFT")
},
{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("REWIND",),
KEYIDS["KEY_PLAYPAUSE"]: ("PAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAY",),
KEYIDS["KEY_NEXTSONG"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_PAGEUP"]: ("PAGEUP",),
KEYIDS["KEY_PAGEDOWN"]: ("PAGEDOWN",)
},
{ # XP1000
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_SUBTITLE"]: ("SUBTITLE",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("SKIPBACK",),
KEYIDS["KEY_REWIND"]: ("REWIND",),
KEYIDS["KEY_FASTFORWARD"]: ("FASTFORWARD",),
KEYIDS["KEY_NEXTSONG"]: ("SKIPFORWARD",),
KEYIDS["KEY_PLAYPAUSE"]: ("PLAYPAUSE",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_BOOKMARKS"]: ("PORTAL",),
KEYIDS["KEY_VMODE"]: ("VMODE",),
KEYIDS["KEY_PROGRAM"]: ("TIMER",),
KEYIDS["KEY_SLEEP"]: ("SLEEP",),
KEYIDS["KEY_EPG"]: ("EPG",),
},
{ # Formuler F1
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_REWIND"]: ("REWIND",),
KEYIDS["KEY_PAUSE"]: ("PAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAY",),
KEYIDS["KEY_FASTFORWARD"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_F1"]: ("F1",),
KEYIDS["KEY_F2"]: ("F2",),
KEYIDS["KEY_F3"]: ("F3",),
KEYIDS["KEY_BACK"]: ("RECALL",),
KEYIDS["KEY_CONTEXT_MENU"]: ("CONTEXT",),
KEYIDS["KEY_EPG"]: ("EPG",),
KEYIDS["KEY_BOOKMARKS"]: ("PLAYLIST",),
},
]
def addKeyBinding(domain, key, context, action, flags):
keyBindings.setdefault((context, action), []).append((key, domain, flags))
# returns a list of (key, flags) for a specified action
def queryKeyBinding(context, action):
if (context, action) in keyBindings:
return [(x[0], x[2]) for x in keyBindings[(context, action)]]
else:
return [ ]
def getKeyDescription(key):
if rc_model.rcIsDefault():
idx = config.misc.rcused.value
else:
rctype = config.plugins.remotecontroltype.rctype.value
if rctype == 14: # XP1000
idx = 3
elif rctype == 18: # F1
idx = 4
else:
idx = 2
if key in keyDescriptions[idx]:
return keyDescriptions[idx].get(key, [ ])
def removeKeyBindings(domain):
# remove all entries of domain 'domain'
for x in keyBindings:
keyBindings[x] = filter(lambda e: e[1] != domain, keyBindings[x])
| ACJTeam/enigma2 | lib/python/Tools/KeyBindings.py | Python | gpl-2.0 | 8,341 |
"""
Turtle RDF graph serializer for RDFLib.
See <http://www.w3.org/TeamSubmission/turtle/> for syntax specification.
"""
from collections import defaultdict
from rdflib.term import BNode, Literal, URIRef
from rdflib.exceptions import Error
from rdflib.serializer import Serializer
from rdflib.namespace import RDF, RDFS
__all__ = ['RecursiveSerializer', 'TurtleSerializer']
class RecursiveSerializer(Serializer):
topClasses = [RDFS.Class]
predicateOrder = [RDF.type, RDFS.label]
maxDepth = 10
indentString = u" "
def __init__(self, store):
super(RecursiveSerializer, self).__init__(store)
self.stream = None
self.reset()
def addNamespace(self, prefix, uri):
self.namespaces[prefix] = uri
def checkSubject(self, subject):
"""Check to see if the subject should be serialized yet"""
if ((self.isDone(subject))
or (subject not in self._subjects)
or ((subject in self._topLevels) and (self.depth > 1))
or (isinstance(subject, URIRef)
and (self.depth >= self.maxDepth))):
return False
return True
def isDone(self, subject):
"""Return true if subject is serialized"""
return subject in self._serialized
def orderSubjects(self):
seen = {}
subjects = []
for classURI in self.topClasses:
members = list(self.store.subjects(RDF.type, classURI))
members.sort()
for member in members:
subjects.append(member)
self._topLevels[member] = True
seen[member] = True
recursable = [
(isinstance(subject, BNode),
self._references[subject], subject)
for subject in self._subjects if subject not in seen]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
return subjects
def preprocess(self):
for triple in self.store.triples((None, None, None)):
self.preprocessTriple(triple)
def preprocessTriple(self, (s, p, o)):
self._references[o]+=1
self._subjects[s] = True
def reset(self):
self.depth = 0
self.lists = {}
self.namespaces = {}
self._references = defaultdict(int)
self._serialized = {}
self._subjects = {}
self._topLevels = {}
for prefix, ns in self.store.namespaces():
self.addNamespace(prefix, ns)
def buildPredicateHash(self, subject):
"""
Build a hash key by predicate to a list of objects for the given
subject
"""
properties = {}
for s, p, o in self.store.triples((subject, None, None)):
oList = properties.get(p, [])
oList.append(o)
properties[p] = oList
return properties
def sortProperties(self, properties):
"""Take a hash from predicate uris to lists of values.
Sort the lists of values. Return a sorted list of properties."""
# Sort object lists
for prop, objects in properties.items():
objects.sort()
# Make sorted list of properties
propList = []
seen = {}
for prop in self.predicateOrder:
if (prop in properties) and (prop not in seen):
propList.append(prop)
seen[prop] = True
props = properties.keys()
props.sort()
for prop in props:
if prop not in seen:
propList.append(prop)
seen[prop] = True
return propList
def subjectDone(self, subject):
"""Mark a subject as done."""
self._serialized[subject] = True
def indent(self, modifier=0):
"""Returns indent string multiplied by the depth"""
return (self.depth + modifier) * self.indentString
def write(self, text):
"""Write text in given encoding."""
self.stream.write(text.encode(self.encoding, 'replace'))
SUBJECT = 0
VERB = 1
OBJECT = 2
_GEN_QNAME_FOR_DT = False
_SPACIOUS_OUTPUT = False
class TurtleSerializer(RecursiveSerializer):
short_name = "turtle"
indentString = ' '
def __init__(self, store):
self._ns_rewrite = {}
super(TurtleSerializer, self).__init__(store)
self.keywords = {
RDF.type: 'a'
}
self.reset()
self.stream = None
self._spacious = _SPACIOUS_OUTPUT
def addNamespace(self, prefix, namespace):
# Turtle does not support prefix that start with _
# if they occur in the graph, rewrite to p_blah
# this is more complicated since we need to make sure p_blah
# does not already exist. And we register namespaces as we go, i.e.
# we may first see a triple with prefix _9 - rewrite it to p_9
# and then later find a triple with a "real" p_9 prefix
# so we need to keep track of ns rewrites we made so far.
if (prefix > '' and prefix[0] == '_') \
or self.namespaces.get(prefix, namespace) != namespace:
if prefix not in self._ns_rewrite:
p = "p" + prefix
while p in self.namespaces:
p = "p" + p
self._ns_rewrite[prefix] = p
prefix = self._ns_rewrite.get(prefix, prefix)
super(TurtleSerializer, self).addNamespace(prefix, namespace)
return prefix
def reset(self):
super(TurtleSerializer, self).reset()
self._shortNames = {}
self._started = False
self._ns_rewrite = {}
def serialize(self, stream, base=None, encoding=None,
spacious=None, **args):
self.reset()
self.stream = stream
self.base = base
if spacious is not None:
self._spacious = spacious
self.preprocess()
subjects_list = self.orderSubjects()
self.startDocument()
firstTime = True
for subject in subjects_list:
if self.isDone(subject):
continue
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
self.write('\n')
self.endDocument()
stream.write(u"\n".encode('ascii'))
def preprocessTriple(self, triple):
super(TurtleSerializer, self).preprocessTriple(triple)
for i, node in enumerate(triple):
if node in self.keywords:
continue
# Don't use generated prefixes for subjects and objects
self.getQName(node, gen_prefix=(i == VERB))
if isinstance(node, Literal) and node.datatype:
self.getQName(node.datatype, gen_prefix=_GEN_QNAME_FOR_DT)
p = triple[1]
if isinstance(p, BNode): # hmm - when is P ever a bnode?
self._references[p]+=1
def getQName(self, uri, gen_prefix=True):
if not isinstance(uri, URIRef):
return None
parts = None
try:
parts = self.store.compute_qname(uri, generate=gen_prefix)
except:
# is the uri a namespace in itself?
pfx = self.store.store.prefix(uri)
if pfx is not None:
parts = (pfx, uri, '')
else:
# nothing worked
return None
prefix, namespace, local = parts
# QName cannot end with .
if local.endswith("."): return None
prefix = self.addNamespace(prefix, namespace)
return u'%s:%s' % (prefix, local)
def startDocument(self):
self._started = True
ns_list = sorted(self.namespaces.items())
for prefix, uri in ns_list:
self.write(self.indent() + '@prefix %s: <%s> .\n' % (prefix, uri))
if ns_list and self._spacious:
self.write('\n')
def endDocument(self):
if self._spacious:
self.write('\n')
def statement(self, subject):
self.subjectDone(subject)
return self.s_squared(subject) or self.s_default(subject)
def s_default(self, subject):
self.write('\n' + self.indent())
self.path(subject, SUBJECT)
self.predicateList(subject)
self.write(' .')
return True
def s_squared(self, subject):
if (self._references[subject] > 0) or not isinstance(subject, BNode):
return False
self.write('\n' + self.indent() + '[]')
self.predicateList(subject)
self.write(' .')
return True
def path(self, node, position, newline=False):
if not (self.p_squared(node, position, newline)
or self.p_default(node, position, newline)):
raise Error("Cannot serialize node '%s'" % (node, ))
def p_default(self, node, position, newline=False):
if position != SUBJECT and not newline:
self.write(' ')
self.write(self.label(node, position))
return True
def label(self, node, position):
if node == RDF.nil:
return '()'
if position is VERB and node in self.keywords:
return self.keywords[node]
if isinstance(node, Literal):
return node._literal_n3(
use_plain=True,
qname_callback=lambda dt: self.getQName(
dt, _GEN_QNAME_FOR_DT))
else:
node = self.relativize(node)
return self.getQName(node, position == VERB) or node.n3()
def p_squared(self, node, position, newline=False):
if (not isinstance(node, BNode)
or node in self._serialized
or self._references[node] > 1
or position == SUBJECT):
return False
if not newline:
self.write(' ')
if self.isValidList(node):
# this is a list
self.write('(')
self.depth += 1 # 2
self.doList(node)
self.depth -= 1 # 2
self.write(' )')
else:
self.subjectDone(node)
self.depth += 2
# self.write('[\n' + self.indent())
self.write('[')
self.depth -= 1
# self.predicateList(node, newline=True)
self.predicateList(node, newline=False)
# self.write('\n' + self.indent() + ']')
self.write(' ]')
self.depth -= 1
return True
def isValidList(self, l):
"""
Checks if l is a valid RDF list, i.e. no nodes have other properties.
"""
try:
if not self.store.value(l, RDF.first):
return False
except:
return False
while l:
if l != RDF.nil and len(
list(self.store.predicate_objects(l))) != 2:
return False
l = self.store.value(l, RDF.rest)
return True
def doList(self, l):
while l:
item = self.store.value(l, RDF.first)
if item is not None:
self.path(item, OBJECT)
self.subjectDone(l)
l = self.store.value(l, RDF.rest)
def predicateList(self, subject, newline=False):
properties = self.buildPredicateHash(subject)
propList = self.sortProperties(properties)
if len(propList) == 0:
return
self.verb(propList[0], newline=newline)
self.objectList(properties[propList[0]])
for predicate in propList[1:]:
self.write(' ;\n' + self.indent(1))
self.verb(predicate, newline=True)
self.objectList(properties[predicate])
def verb(self, node, newline=False):
self.path(node, VERB, newline)
def objectList(self, objects):
count = len(objects)
if count == 0:
return
depthmod = (count == 1) and 0 or 1
self.depth += depthmod
self.path(objects[0], OBJECT)
for obj in objects[1:]:
self.write(',\n' + self.indent(1))
self.path(obj, OBJECT, newline=True)
self.depth -= depthmod
| gloaec/trifle | src/rdflib/plugins/serializers/turtle.py | Python | gpl-3.0 | 12,175 |
from __future__ import unicode_literals
import frappe
import frappe.website.render
def execute():
for item_code in frappe.db.sql_list("""select distinct variant_of from `tabItem`
where variant_of is not null and variant_of !='' and show_in_website=1"""):
item = frappe.get_doc("Item", item_code)
item.db_set("show_in_website", 1, update_modified=False)
item.get_route()
item.db_set("page_name", item.page_name, update_modified=False)
frappe.website.render.clear_cache()
| mahabuber/erpnext | erpnext/patches/v6_5/show_in_website_for_template_item.py | Python | agpl-3.0 | 486 |
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test the 'symlink' rule.
import os
import BoostBuild
if os.name != 'posix':
print "The symlink tests can be run on posix only."
import sys
sys.exit(1)
t = BoostBuild.Tester()
t.write("jamroot.jam", "import gcc ;")
t.write("jamfile.jam", """
exe hello : hello.cpp ;
symlink hello_release : hello/<variant>release ;
symlink hello_debug : hello/<variant>debug ;
symlink links/hello_release : hello/<variant>release ;
""")
t.write("hello.cpp", """
int main() {}
""")
t.run_build_system()
t.expect_addition([
'hello_debug.exe',
'hello_release.exe',
'links/hello_release.exe'])
t.cleanup()
| mxrrow/zaicoin | src/deps/boost/tools/build/v2/test/symlink.py | Python | mit | 845 |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import itertools
import sys
from django.db import connections, router, transaction, IntegrityError
from django.db.models.fields import AutoField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import sql
from django.utils.functional import partition
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
return len(self._result_cache)
def __iter__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = False
if connections[self.db].features.supports_select_related:
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
aggregate_select = self.query.aggregate_select.keys()
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
index_start = len(extra_select)
aggregate_start = index_start + len(load_fields or self.model._meta.fields)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
if fill_cache:
klass_info = get_klass_info(model, max_depth=max_depth,
requested=requested, only_load=only_load)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(row, index_start, db, klass_info,
offset=len(aggregate_select))
else:
if skip:
row_data = row[index_start:aggregate_start]
obj = model_cls(**dict(zip(init_list, row_data)))
else:
# Omit aggregates in object creation.
obj = model(*row[index_start:aggregate_start])
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i+aggregate_start])
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def bulk_create(self, objs):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. We're punting on these for now
# because they are relatively rare cases.
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_fields
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self.model._base_manager._insert(objs, fields=fields, using=self.db)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self.model._base_manager._insert(objs_with_pk, fields=fields, using=self.db)
if objs_without_pk:
self.model._base_manager._insert(objs_without_pk, fields=[f for f in fields if not isinstance(f, AutoField)], using=self.db)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
return objs
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
try:
self._for_write = True
return self.get(**lookup), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError, e:
transaction.savepoint_rollback(sid, using=self.db)
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
# Re-raise the IntegrityError with its original traceback.
raise exc_info[1], None, exc_info[2]
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering()
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
qs.query.clear_ordering(force_empty=True)
return dict([(obj._get_pk_val(), obj) for obj in qs])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering()
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, **kwargs):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
# Default to false for nowait
nowait = kwargs.pop('nowait', False)
obj = self._clone()
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the
the list is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = getattr(self, '_fields', None)
if names is None:
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names)
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield iter([]).next()
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, fields=None):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
def aggregate(self, *args, **kwargs):
"""
Return a dict mapping the aggregate names to None
"""
for arg in args:
kwargs[arg.default_alias] = arg
return dict([(key, None) for key in kwargs])
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
only_load=None, local_only=False):
"""
Helper function that recursively returns an information for a klass, to be
used in get_cached_row. It exists just to compute this information only
once for entire queryset. Otherwise it would be computed for each row, which
leads to poor perfomance on large querysets.
Arguments:
* klass - the class to retrieve (and instantiate)
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
if only_load:
load_fields = only_load.get(klass) or set()
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.name)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
if skip:
klass = deferred_class_factory(klass, skip)
field_names = init_list
else:
field_names = ()
else:
# Load all fields on klass
# We trying to not populate field_names variable for perfomance reason.
# If field_names variable is set, it is used to instantiate desired fields,
# by passing **dict(zip(field_names, fields)) as kwargs to Model.__init__ method.
# But kwargs version of Model.__init__ is slower, so we should avoid using
# it when it is not really neccesary.
if local_only and len(klass._meta.local_fields) != len(klass._meta.fields):
field_count = len(klass._meta.local_fields)
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_count = len(klass._meta.fields)
field_names = ()
restricted = requested is not None
related_fields = []
for f in klass._meta.fields:
if select_related_descend(f, restricted, requested):
if restricted:
next = requested[f.name]
else:
next = None
klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load)
related_fields.append((f, klass_info))
reverse_related_fields = []
if restricted:
for o in klass._meta.get_all_related_objects():
if o.field.unique and select_related_descend(o.field, restricted, requested, reverse=True):
next = requested[o.field.related_query_name()]
klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load, local_only=True)
reverse_related_fields.append((o.field, klass_info))
return klass, field_names, field_count, related_fields, reverse_related_fields
def get_cached_row(row, index_start, using, klass_info, offset=0):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* offset - the number of additional fields that are known to
exist in row for `klass`. This usually means the number of
annotated results on `klass`.
* using - the database alias on which the query is being executed.
* klass_info - result of the get_klass_info function
"""
if klass_info is None:
return None
klass, field_names, field_count, related_fields, reverse_related_fields = klass_info
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
else:
if field_names:
obj = klass(**dict(zip(field_names, fields)))
else:
obj = klass(*fields)
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
# Instantiate related fields
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f, klass_info in related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
for f, klass_info in reverse_related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values
# on the related object
for rel_field, rel_model in rel_obj._meta.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in model_init_field_names.iteritems():
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "<RawQuerySet: %r>" % (self.raw_query % tuple(self.params))
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
from django.db.models.sql.constants import LOOKUP_SEP
if len(result_cache) == 0:
return # nothing to do
model = result_cache[0].__class__
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_lookups = set() # list of lookups like foo__bar__baz
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = [] # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = itertools.chain(related_lookups, auto_lookups)
for lookup in all_lookups:
if lookup in done_lookups:
# We've done exactly this already, skip the whole thing
continue
done_lookups.add(lookup)
# Top level, the list of objects to decorate is the the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
attrs = lookup.split(LOOKUP_SEP)
for level, attr in enumerate(attrs):
# Prepare main instances
if len(obj_list) == 0:
break
good_objects = True
for obj in obj_list:
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
else:
# We already did this list
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogenous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(attr, first_obj.__class__.__name__, lookup))
if level == len(attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to a item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup)
if prefetcher is not None and not is_fetched:
# Check we didn't do this already
current_lookup = LOOKUP_SEP.join(attrs[0:level+1])
if current_lookup in done_queries:
obj_list = done_queries[current_lookup]
else:
obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and
descriptor in followed_descriptors):
for f in additional_prl:
new_prl = LOOKUP_SEP.join([current_lookup, f])
auto_lookups.append(new_prl)
done_queries[current_lookup] = obj_list
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with that list.
obj_list = [getattr(obj, attr) for obj in obj_list]
# Filter out 'None' so that we can continue with nullable
# relations.
obj_list = [obj for obj in obj_list if obj is not None]
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_query_set().
Returns a 4 tuple containing:
(the object with get_prefetch_query_set (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
attr_found = False
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
try:
rel_obj = getattr(instance, attr)
attr_found = True
except AttributeError:
pass
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_query_set() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_query_set'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_query_set'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, attname):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_query_set() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name =\
prefetcher.get_prefetch_query_set(instances)
# We have to handle the possibility that the default manager itself added
# prefetch_related lookups to the QuerySet we just got back. We don't want to
# trigger the prefetch_related functionality by evaluating the query.
# Rather, we need to merge in the prefetch_related lookups.
additional_prl = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_prl:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
if rel_attr_val not in rel_obj_cache:
rel_obj_cache[rel_attr_val] = []
rel_obj_cache[rel_attr_val].append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
# Need to assign to single cache on instance
if vals:
setattr(obj, cache_name, vals[0])
else:
# Multi, attribute represents a manager with an .all() method that
# returns a QuerySet
qs = getattr(obj, attname).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now, since we
# have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_prl
| uiri/pxqz | venv/lib/python2.7/site-packages/django/db/models/query.py | Python | gpl-3.0 | 69,558 |
from __future__ import division
import numpy as np
from bokeh.plotting import figure, HBox, output_file, show, VBox
from bokeh.models import Range1d
# create some data using python lists
x1 = [1, 2, 5, 7, -8, 5, 2, 7, 1, -3, -5, 1.7, 5.4, -5]
y1 = [5, 6, -3, 1.5, 2, 1, 1, 9, 2.4, -3, 6, 8, 2, 4]
# create some data using numpy arrays
x2 = np.random.random(size=100) * 20 - 10
y2 = np.random.random(size=100) * 20 - 10
# EXERCISE: create some data for x3 and y3 however you like
# EXERCISE: output static HTML file
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
# EXERCISE: create two Range1d objects to reuse in the plots. Use the [-10, 10]
# for the bounds. Note: Range1d's are initialized like: Range1d(start=0, end=1)
# EXERCISE: Plot all the sets of points on different plots p1, p2, p3. Use the
# ranges above for `x_range` and `y_range` for each figure. Set different colors
# as well. Try setting line_color and fill_color instead of just color. You can
# also set alpha, line_alpha, and fill_alpha if you like. Set tools to TOOLS on
# the figures. Change the value of the 'marker' parameter, "circle", "square",
# "triangle", etc. One example is given
p1 = figure(x_range=xr, y_range=yr, tools=TOOLS, plot_width=300, plot_height=300)
p1.scatter(x1, y1, size=12, color="red", alpha=0.5)
# EXERCISE: Try panning and zooming one of the plots with another one visible!
# Set the plot_width and plot_height to smaller if necessary
# EXERCISE: create a new figure p4
# Lets plot 4000 circles, you can play around with this if you like
N = 4000
# Create a bunch of random points, radii and colors for plotting
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))
]
# EXERCISE: use the `circle` renderer to scatter all the circles. Set the
# 'fill_color' to the colors above, the 'line_color' to None, and the 'radius'
# to the radii. Also try setting the fill_alpha to something less than one.
# Use TOOLS from above to set a tools parameter.
# NOTE: since we are passing 'radius' as a parameter, the size of the circles
# is computed in **data** space, not in pixels. If you'd like to specify
# radii in pixels, use: radius_units="screen"
# show the plots arrayed in a layout
show(VBox(HBox(p1, p2, p3), p4))
| almarklein/bokeh | sphinx/source/tutorial/exercises/scatter.py | Python | bsd-3-clause | 2,389 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
| gwwfps/boxrps | werkzeug/contrib/atom.py | Python | mit | 14,976 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
import os
import re
import urlparse
_next_page_id = 0
class Page(object):
def __init__(self, url, page_set=None, base_dir=None, name=''):
self._url = url
self._page_set = page_set
# Default value of base_dir is the directory of the file that defines the
# class of this page instance.
if base_dir is None:
base_dir = os.path.dirname(inspect.getfile(self.__class__))
self._base_dir = base_dir
self._name = name
global _next_page_id
self._id = _next_page_id
_next_page_id += 1
# These attributes can be set dynamically by the page.
self.synthetic_delays = dict()
self.startup_url = page_set.startup_url if page_set else ''
self.credentials = None
self.disabled = False
self.skip_waits = False
self.script_to_evaluate_on_commit = None
self._SchemeErrorCheck()
def _SchemeErrorCheck(self):
if not self._scheme:
raise ValueError('Must prepend the URL with scheme (e.g. file://)')
if self.startup_url:
startup_url_scheme = urlparse.urlparse(self.startup_url).scheme
if not startup_url_scheme:
raise ValueError('Must prepend the URL with scheme (e.g. http://)')
if startup_url_scheme == 'file':
raise ValueError('startup_url with local file scheme is not supported')
def TransferToPageSet(self, another_page_set):
""" Transfer this page to another page set.
Args:
another_page_set: an instance of telemetry.page.PageSet to transfer this
page to.
Note:
This method removes this page instance from the pages list of its current
page_set, so one should be careful not to iterate through the list of
pages of a page_set and calling this method.
For example, the below loop is erroneous:
for p in page_set_A.pages:
p.TransferToPageSet(page_set_B.pages)
"""
assert self._page_set
if another_page_set is self._page_set:
return
self._page_set.pages.remove(self)
self._page_set = another_page_set
self._page_set.AddPage(self)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
def CanRunOnBrowser(self, browser_info):
"""Override this to returns whether this page can be run on specific
browser.
Args:
browser_info: an instance of telemetry.core.browser_info.BrowserInfo
"""
assert browser_info
return True
def AsDict(self):
"""Converts a page object to a dict suitable for JSON output."""
d = {
'id': self._id,
'url': self._url,
}
if self._name:
d['name'] = self._name
return d
@property
def page_set(self):
return self._page_set
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def id(self):
return self._id
def GetSyntheticDelayCategories(self):
result = []
for delay, options in self.synthetic_delays.items():
options = '%f;%s' % (options.get('target_duration', 0),
options.get('mode', 'static'))
result.append('DELAY(%s;%s)' % (delay, options))
return result
def __lt__(self, other):
return self.url < other.url
def __cmp__(self, other):
x = cmp(self.name, other.name)
if x != 0:
return x
return cmp(self.url, other.url)
def __str__(self):
return self.url
def AddCustomizeBrowserOptions(self, options):
""" Inherit page overrides this to add customized browser options."""
pass
@property
def _scheme(self):
return urlparse.urlparse(self.url).scheme
@property
def is_file(self):
"""Returns True iff this URL points to a file."""
return self._scheme == 'file'
@property
def is_local(self):
"""Returns True iff this URL is local. This includes chrome:// URLs."""
return self._scheme in ['file', 'chrome', 'about']
@property
def file_path(self):
"""Returns the path of the file, stripping the scheme and query string."""
assert self.is_file
# Because ? is a valid character in a filename,
# we have to treat the url as a non-file by removing the scheme.
parsed_url = urlparse.urlparse(self.url[7:])
return os.path.normpath(os.path.join(
self._base_dir, parsed_url.netloc + parsed_url.path))
@property
def file_path_url(self):
"""Returns the file path, including the params, query, and fragment."""
assert self.is_file
file_path_url = os.path.normpath(os.path.join(self._base_dir, self.url[7:]))
# Preserve trailing slash or backslash.
# It doesn't matter in a file path, but it does matter in a URL.
if self.url.endswith('/'):
file_path_url += os.sep
return file_path_url
@property
def serving_dir(self):
file_path = os.path.realpath(self.file_path)
if os.path.isdir(file_path):
return file_path
else:
return os.path.dirname(file_path)
@property
def file_safe_name(self):
"""A version of display_name that's safe to use as a filename."""
# Just replace all special characters in the url with underscore.
return re.sub('[^a-zA-Z0-9]', '_', self.display_name)
@property
def display_name(self):
if self.name:
return self.name
if not self.is_file:
return self.url
all_urls = [p.url.rstrip('/') for p in self.page_set if p.is_file]
common_prefix = os.path.dirname(os.path.commonprefix(all_urls))
return self.url[len(common_prefix):].strip('/')
@property
def archive_path(self):
return self.page_set.WprFilePathForPage(self)
| GladeRom/android_external_chromium_org | tools/telemetry/telemetry/page/__init__.py | Python | bsd-3-clause | 5,716 |
"""
Utility functions from 2to3, 3to2 and python-modernize (and some home-grown
ones).
Licences:
2to3: PSF License v2
3to2: Apache Software License (from 3to2/setup.py)
python-modernize licence: BSD (from python-modernize/LICENSE)
"""
from lib2to3.fixer_util import (FromImport, Newline, is_import,
find_root, does_tree_import, Comma)
from lib2to3.pytree import Leaf, Node
from lib2to3.pygram import python_symbols as syms, python_grammar
from lib2to3.pygram import token
from lib2to3.fixer_util import (Node, Call, Name, syms, Comma, Number)
import re
## These functions are from 3to2 by Joe Amenta:
def Star(prefix=None):
return Leaf(token.STAR, u'*', prefix=prefix)
def DoubleStar(prefix=None):
return Leaf(token.DOUBLESTAR, u'**', prefix=prefix)
def Minus(prefix=None):
return Leaf(token.MINUS, u'-', prefix=prefix)
def commatize(leafs):
u"""
Accepts/turns: (Name, Name, ..., Name, Name)
Returns/into: (Name, Comma, Name, Comma, ..., Name, Comma, Name)
"""
new_leafs = []
for leaf in leafs:
new_leafs.append(leaf)
new_leafs.append(Comma())
del new_leafs[-1]
return new_leafs
def indentation(node):
u"""
Returns the indentation for this node
Iff a node is in a suite, then it has indentation.
"""
while node.parent is not None and node.parent.type != syms.suite:
node = node.parent
if node.parent is None:
return u""
# The first three children of a suite are NEWLINE, INDENT, (some other node)
# INDENT.value contains the indentation for this suite
# anything after (some other node) has the indentation as its prefix.
if node.type == token.INDENT:
return node.value
elif node.prev_sibling is not None and node.prev_sibling.type == token.INDENT:
return node.prev_sibling.value
elif node.prev_sibling is None:
return u""
else:
return node.prefix
def indentation_step(node):
u"""
Dirty little trick to get the difference between each indentation level
Implemented by finding the shortest indentation string
(technically, the "least" of all of the indentation strings, but
tabs and spaces mixed won't get this far, so those are synonymous.)
"""
r = find_root(node)
# Collect all indentations into one set.
all_indents = set(i.value for i in r.pre_order() if i.type == token.INDENT)
if not all_indents:
# nothing is indented anywhere, so we get to pick what we want
return u" " # four spaces is a popular convention
else:
return min(all_indents)
def suitify(parent):
u"""
Turn the stuff after the first colon in parent's children
into a suite, if it wasn't already
"""
for node in parent.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# One-liners have no suite node, we have to fake one up
for i, node in enumerate(parent.children):
if node.type == token.COLON:
break
else:
raise ValueError(u"No class suite and no ':'!")
# Move everything into a suite node
suite = Node(syms.suite, [Newline(), Leaf(token.INDENT, indentation(node) + indentation_step(node))])
one_node = parent.children[i+1]
one_node.remove()
one_node.prefix = u''
suite.append_child(one_node)
parent.append_child(suite)
def NameImport(package, as_name=None, prefix=None):
u"""
Accepts a package (Name node), name to import it as (string), and
optional prefix and returns a node:
import <package> [as <as_name>]
"""
if prefix is None:
prefix = u""
children = [Name(u"import", prefix=prefix), package]
if as_name is not None:
children.extend([Name(u"as", prefix=u" "),
Name(as_name, prefix=u" ")])
return Node(syms.import_name, children)
_compound_stmts = (syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.with_stmt)
_import_stmts = (syms.import_name, syms.import_from)
def import_binding_scope(node):
u"""
Generator yields all nodes for which a node (an import_stmt) has scope
The purpose of this is for a call to _find() on each of them
"""
# import_name / import_from are small_stmts
assert node.type in _import_stmts
test = node.next_sibling
# A small_stmt can only be followed by a SEMI or a NEWLINE.
while test.type == token.SEMI:
nxt = test.next_sibling
# A SEMI can only be followed by a small_stmt or a NEWLINE
if nxt.type == token.NEWLINE:
break
else:
yield nxt
# A small_stmt can only be followed by either a SEMI or a NEWLINE
test = nxt.next_sibling
# Covered all subsequent small_stmts after the import_stmt
# Now to cover all subsequent stmts after the parent simple_stmt
parent = node.parent
assert parent.type == syms.simple_stmt
test = parent.next_sibling
while test is not None:
# Yes, this will yield NEWLINE and DEDENT. Deal with it.
yield test
test = test.next_sibling
context = parent.parent
# Recursively yield nodes following imports inside of a if/while/for/try/with statement
if context.type in _compound_stmts:
# import is in a one-liner
c = context
while c.next_sibling is not None:
yield c.next_sibling
c = c.next_sibling
context = context.parent
# Can't chain one-liners on one line, so that takes care of that.
p = context.parent
if p is None:
return
# in a multi-line suite
while p.type in _compound_stmts:
if context.type == syms.suite:
yield context
context = context.next_sibling
if context is None:
context = p.parent
p = context.parent
if p is None:
break
def ImportAsName(name, as_name, prefix=None):
new_name = Name(name)
new_as = Name(u"as", prefix=u" ")
new_as_name = Name(as_name, prefix=u" ")
new_node = Node(syms.import_as_name, [new_name, new_as, new_as_name])
if prefix is not None:
new_node.prefix = prefix
return new_node
def future_import(feature, node):
"""
This seems to work
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
# Look for a shebang or encoding line
shebang_encoding_idx = None
for idx, node in enumerate(root.children):
# Is it a shebang or encoding line?
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
if node.type == syms.simple_stmt and \
len(node.children) > 0 and node.children[0].type == token.STRING:
# skip over docstring
continue
names = check_future_import(node)
if not names:
# not a future statement; need to insert before this
break
if feature in names:
# already imported
return
import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
if shebang_encoding_idx == 0 and idx == 0:
# If this __future__ import would go on the first line,
# detach the shebang / encoding prefix from the current first line.
# and attach it to our new __future__ import node.
import_.prefix = root.children[0].prefix
root.children[0].prefix = u''
# End the __future__ import line with a newline and add a blank line
# afterwards:
children = [import_ , Newline()]
root.insert_child(idx, Node(syms.simple_stmt, children))
def future_import2(feature, node):
"""
An alternative to future_import() which might not work ...
"""
root = find_root(node)
if does_tree_import(u"__future__", feature, node):
return
insert_pos = 0
for idx, node in enumerate(root.children):
if node.type == syms.simple_stmt and node.children and \
node.children[0].type == token.STRING:
insert_pos = idx + 1
break
for thing_after in root.children[insert_pos:]:
if thing_after.type == token.NEWLINE:
insert_pos += 1
continue
prefix = thing_after.prefix
thing_after.prefix = u""
break
else:
prefix = u""
import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix))
def parse_args(arglist, scheme):
u"""
Parse a list of arguments into a dict
"""
arglist = [i for i in arglist if i.type != token.COMMA]
ret_mapping = dict([(k, None) for k in scheme])
for i, arg in enumerate(arglist):
if arg.type == syms.argument and arg.children[1].type == token.EQUAL:
# argument < NAME '=' any >
slot = arg.children[0].value
ret_mapping[slot] = arg.children[2]
else:
slot = scheme[i]
ret_mapping[slot] = arg
return ret_mapping
# def is_import_from(node):
# """Returns true if the node is a statement "from ... import ..."
# """
# return node.type == syms.import_from
def is_import_stmt(node):
return (node.type == syms.simple_stmt and node.children and
is_import(node.children[0]))
def touch_import_top(package, name_to_import, node):
"""Works like `does_tree_import` but adds an import statement at the
top if it was not imported (but below any __future__ imports).
Based on lib2to3.fixer_util.touch_import()
Calling this multiple times adds the imports in reverse order.
Also adds "standard_library.install_aliases()" after "from future import
standard_library". This should probably be factored into another function.
"""
root = find_root(node)
if does_tree_import(package, name_to_import, root):
return
# Ideally, we would look for whether futurize --all-imports has been run,
# as indicated by the presence of ``from builtins import (ascii, ...,
# zip)`` -- and, if it has, we wouldn't import the name again.
# Look for __future__ imports and insert below them
found = False
for name in ['absolute_import', 'division', 'print_function',
'unicode_literals']:
if does_tree_import('__future__', name, root):
found = True
break
if found:
# At least one __future__ import. We want to loop until we've seen them
# all.
start, end = None, None
for idx, node in enumerate(root.children):
if check_future_import(node):
start = idx
# Start looping
idx2 = start
while node:
node = node.next_sibling
idx2 += 1
if not check_future_import(node):
end = idx2
break
break
assert start is not None
assert end is not None
insert_pos = end
else:
# No __future__ imports.
# We look for a docstring and insert the new node below that. If no docstring
# exists, just insert the node at the top.
for idx, node in enumerate(root.children):
if node.type != syms.simple_stmt:
break
if not (node.children and node.children[0].type == token.STRING):
# This is the usual case.
break
insert_pos = idx
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, u"import"),
Leaf(token.NAME, name_to_import, prefix=u" ")
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
if name_to_import == u'standard_library':
# Add:
# standard_library.install_aliases()
# after:
# from future import standard_library
install_hooks = Node(syms.simple_stmt,
[Node(syms.power,
[Leaf(token.NAME, u'standard_library'),
Node(syms.trailer, [Leaf(token.DOT, u'.'),
Leaf(token.NAME, u'install_aliases')]),
Node(syms.trailer, [Leaf(token.LPAR, u'('),
Leaf(token.RPAR, u')')])
])
]
)
children_hooks = [install_hooks, Newline()]
else:
children_hooks = []
FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")])
children_import = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children_import))
if len(children_hooks) > 0:
root.insert_child(insert_pos + 1, Node(syms.simple_stmt, children_hooks))
## The following functions are from python-modernize by Armin Ronacher:
# (a little edited).
def check_future_import(node):
"""If this is a future import, return set of symbols that are imported,
else return None."""
# node should be the import statement here
savenode = node
if not (node.type == syms.simple_stmt and node.children):
return set()
node = node.children[0]
# now node is the import_from node
if not (node.type == syms.import_from and
# node.type == token.NAME and # seems to break it
hasattr(node.children[1], 'value') and
node.children[1].value == u'__future__'):
return set()
node = node.children[3]
# now node is the import_as_name[s]
# print(python_grammar.number2symbol[node.type]) # breaks sometimes
if node.type == syms.import_as_names:
result = set()
for n in node.children:
if n.type == token.NAME:
result.add(n.value)
elif n.type == syms.import_as_name:
n = n.children[0]
assert n.type == token.NAME
result.add(n.value)
return result
elif node.type == syms.import_as_name:
node = node.children[0]
assert node.type == token.NAME
return set([node.value])
elif node.type == token.NAME:
return set([node.value])
else:
# TODO: handle brackets like this:
# from __future__ import (absolute_import, division)
assert False, "strange import: %s" % savenode
SHEBANG_REGEX = r'^#!.*python'
ENCODING_REGEX = r"^#.*coding[:=]\s*([-\w.]+)"
def is_shebang_comment(node):
"""
Comments are prefixes for Leaf nodes. Returns whether the given node has a
prefix that looks like a shebang line or an encoding line:
#!/usr/bin/env python
#!/usr/bin/python3
"""
return bool(re.match(SHEBANG_REGEX, node.prefix))
def is_encoding_comment(node):
"""
Comments are prefixes for Leaf nodes. Returns whether the given node has a
prefix that looks like an encoding line:
# coding: utf-8
# encoding: utf-8
# -*- coding: <encoding name> -*-
# vim: set fileencoding=<encoding name> :
"""
return bool(re.match(ENCODING_REGEX, node.prefix))
def wrap_in_fn_call(fn_name, args, prefix=None):
"""
Example:
>>> wrap_in_fn_call("oldstr", (arg,))
oldstr(arg)
>>> wrap_in_fn_call("olddiv", (arg1, arg2))
olddiv(arg1, arg2)
"""
assert len(args) > 0
if len(args) == 1:
newargs = args
elif len(args) == 2:
expr1, expr2 = args
newargs = [expr1, Comma(), expr2]
else:
assert NotImplementedError('write me')
return Call(Name(fn_name), newargs, prefix=prefix)
| hughperkins/kgsgo-dataset-preprocessor | thirdparty/future/src/libfuturize/fixer_util.py | Python | mpl-2.0 | 16,007 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import email_split
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# welcome email sent to portal users
# (note that calling '_' has no effect except exporting those strings for translation)
WELCOME_EMAIL_SUBJECT = _("Your Odoo account at %(company)s")
WELCOME_EMAIL_BODY = _("""Dear %(name)s,
You have been given access to %(company)s's %(portal)s.
Your login account data is:
Username: %(login)s
Portal: %(portal_url)s
Database: %(db)s
You can set or change your password via the following url:
%(signup_url)s
%(welcome_message)s
--
Odoo - Open Source Business Applications
http://www.openerp.com
""")
def extract_email(email):
""" extract the email address from a user-friendly email address """
addresses = email_split(email)
return addresses[0] if addresses else ''
class wizard(osv.osv_memory):
"""
A wizard to manage the creation/removal of portal users.
"""
_name = 'portal.wizard'
_description = 'Portal Access Management'
_columns = {
'portal_id': fields.many2one('res.groups', domain=[('is_portal', '=', True)], required=True,
string='Portal', help="The portal that users can be added in or removed from."),
'user_ids': fields.one2many('portal.wizard.user', 'wizard_id', string='Users'),
'welcome_message': fields.text(string='Invitation Message',
help="This text is included in the email sent to new users of the portal."),
}
def _default_portal(self, cr, uid, context):
portal_ids = self.pool.get('res.groups').search(cr, uid, [('is_portal', '=', True)])
return portal_ids and portal_ids[0] or False
_defaults = {
'portal_id': _default_portal,
}
def onchange_portal_id(self, cr, uid, ids, portal_id, context=None):
# for each partner, determine corresponding portal.wizard.user records
res_partner = self.pool.get('res.partner')
partner_ids = context and context.get('active_ids') or []
contact_ids = set()
user_changes = []
for partner in res_partner.browse(cr, SUPERUSER_ID, partner_ids, context):
for contact in (partner.child_ids or [partner]):
# make sure that each contact appears at most once in the list
if contact.id not in contact_ids:
contact_ids.add(contact.id)
in_portal = False
if contact.user_ids:
in_portal = portal_id in [g.id for g in contact.user_ids[0].groups_id]
user_changes.append((0, 0, {
'partner_id': contact.id,
'email': contact.email,
'in_portal': in_portal,
}))
return {'value': {'user_ids': user_changes}}
def action_apply(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context)
portal_user_ids = [user.id for user in wizard.user_ids]
self.pool.get('portal.wizard.user').action_apply(cr, uid, portal_user_ids, context)
return {'type': 'ir.actions.act_window_close'}
class wizard_user(osv.osv_memory):
"""
A model to configure users in the portal wizard.
"""
_name = 'portal.wizard.user'
_description = 'Portal User Config'
_columns = {
'wizard_id': fields.many2one('portal.wizard', string='Wizard', required=True, ondelete='cascade'),
'partner_id': fields.many2one('res.partner', string='Contact', required=True, readonly=True),
'email': fields.char(string='Email', size=240),
'in_portal': fields.boolean('In Portal'),
}
def get_error_messages(self, cr, uid, ids, context=None):
res_users = self.pool.get('res.users')
emails = []
error_empty = []
error_emails = []
error_user = []
ctx = dict(context or {}, active_test=False)
for wizard_user in self.browse(cr, SUPERUSER_ID, ids, context):
if wizard_user.in_portal and not self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context):
email = extract_email(wizard_user.email)
if not email:
error_empty.append(wizard_user.partner_id)
elif email in emails and email not in error_emails:
error_emails.append(wizard_user.partner_id)
user = res_users.search(cr, SUPERUSER_ID, [('login', '=', email)], context=ctx)
if user:
error_user.append(wizard_user.partner_id)
emails.append(email)
error_msg = []
if error_empty:
error_msg.append("%s\n- %s" % (_("Some contacts don't have a valid email: "),
'\n- '.join(['%s' % (p.display_name,) for p in error_empty])))
if error_emails:
error_msg.append("%s\n- %s" % (_("Several contacts have the same email: "),
'\n- '.join([p.email for p in error_emails])))
if error_user:
error_msg.append("%s\n- %s" % (_("Some contacts have the same email as an existing portal user:"),
'\n- '.join(['%s <%s>' % (p.display_name, p.email) for p in error_user])))
if error_msg:
error_msg.append(_("To resolve this error, you can: \n"
"- Correct the emails of the relevant contacts\n"
"- Grant access only to contacts with unique emails"))
return error_msg
def action_apply(self, cr, uid, ids, context=None):
error_msg = self.get_error_messages(cr, uid, ids, context=context)
if error_msg:
raise osv.except_osv(_('Contacts Error'), "\n\n".join(error_msg))
for wizard_user in self.browse(cr, SUPERUSER_ID, ids, context):
portal = wizard_user.wizard_id.portal_id
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
if wizard_user.partner_id.email != wizard_user.email:
wizard_user.partner_id.write({'email': wizard_user.email})
if wizard_user.in_portal:
# create a user if necessary, and make sure it is in the portal group
if not user:
user = self._create_user(cr, SUPERUSER_ID, wizard_user, context)
if (not user.active) or (portal not in user.groups_id):
user.write({'active': True, 'groups_id': [(4, portal.id)]})
# prepare for the signup process
user.partner_id.signup_prepare()
self._send_email(cr, uid, wizard_user, context)
wizard_user.refresh()
else:
# remove the user (if it exists) from the portal group
if user and (portal in user.groups_id):
# if user belongs to portal only, deactivate it
if len(user.groups_id) <= 1:
user.write({'groups_id': [(3, portal.id)], 'active': False})
else:
user.write({'groups_id': [(3, portal.id)]})
def _retrieve_user(self, cr, uid, wizard_user, context=None):
""" retrieve the (possibly inactive) user corresponding to wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
context = dict(context or {}, active_test=False)
res_users = self.pool.get('res.users')
domain = [('partner_id', '=', wizard_user.partner_id.id)]
user_ids = res_users.search(cr, uid, domain, context=context)
return user_ids and res_users.browse(cr, uid, user_ids[0], context=context) or False
def _create_user(self, cr, uid, wizard_user, context=None):
""" create a new user for wizard_user.partner_id
@param wizard_user: browse record of model portal.wizard.user
@return: browse record of model res.users
"""
res_users = self.pool.get('res.users')
create_context = dict(context or {}, noshortcut=True, no_reset_password=True) # to prevent shortcut creation
values = {
'email': extract_email(wizard_user.email),
'login': extract_email(wizard_user.email),
'partner_id': wizard_user.partner_id.id,
'groups_id': [(6, 0, [])],
}
user_id = res_users.create(cr, uid, values, context=create_context)
return res_users.browse(cr, uid, user_id, context)
def _send_email(self, cr, uid, wizard_user, context=None):
""" send notification email to a new portal user
@param wizard_user: browse record of model portal.wizard.user
@return: the id of the created mail.mail record
"""
res_partner = self.pool['res.partner']
this_context = context
this_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context)
if not this_user.email:
raise osv.except_osv(_('Email Required'),
_('You must have an email address in your User Preferences to send emails.'))
# determine subject and body in the portal user's language
user = self._retrieve_user(cr, SUPERUSER_ID, wizard_user, context)
context = dict(this_context or {}, lang=user.lang)
ctx_portal_url = dict(context, signup_force_type_in_url='')
portal_url = res_partner._get_signup_url_for_action(cr, uid,
[user.partner_id.id],
context=ctx_portal_url)[user.partner_id.id]
res_partner.signup_prepare(cr, uid, [user.partner_id.id], context=context)
data = {
'company': this_user.company_id.name,
'portal': wizard_user.wizard_id.portal_id.name,
'welcome_message': wizard_user.wizard_id.welcome_message or "",
'db': cr.dbname,
'name': user.name,
'login': user.login,
'signup_url': user.signup_url,
'portal_url': portal_url,
}
mail_mail = self.pool.get('mail.mail')
mail_values = {
'email_from': this_user.email,
'email_to': user.email,
'subject': _(WELCOME_EMAIL_SUBJECT) % data,
'body_html': '<pre>%s</pre>' % (_(WELCOME_EMAIL_BODY) % data),
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create(cr, uid, mail_values, context=this_context)
return mail_mail.send(cr, uid, [mail_id], context=this_context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| osbzr/gooderp_org | portal/wizard/portal_wizard.py | Python | agpl-3.0 | 11,851 |
# -*- coding: utf-8 -*-
API_VERSION = 'v1'
DOMAIN = {'contacts': {}}
| Jumpscale/web | pythonlib/eve/tests/test_version.py | Python | apache-2.0 | 70 |
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-Metal DB testcase for BareMetalNode
"""
from nova import exception
from nova.tests.virt.baremetal.db import base
from nova.tests.virt.baremetal.db import utils
from nova.virt.baremetal import db
class BareMetalNodesTestCase(base.BMDBTestCase):
def _create_nodes(self):
nodes = [
utils.new_bm_node(pm_address='0', service_host="host1",
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='1', service_host="host2",
instance_uuid='A',
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='2', service_host="host2",
memory_mb=1000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='3', service_host="host2",
memory_mb=1000, cpus=2, local_gb=1000),
utils.new_bm_node(pm_address='4', service_host="host2",
memory_mb=2000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='5', service_host="host2",
memory_mb=2000, cpus=2, local_gb=1000),
]
self.ids = []
for n in nodes:
ref = db.bm_node_create(self.context, n)
self.ids.append(ref['id'])
def test_get_all(self):
r = db.bm_node_get_all(self.context)
self.assertEqual(r, [])
self._create_nodes()
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 6)
def test_get(self):
self._create_nodes()
r = db.bm_node_get(self.context, self.ids[0])
self.assertEqual(r['pm_address'], '0')
r = db.bm_node_get(self.context, self.ids[1])
self.assertEqual(r['pm_address'], '1')
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, -1)
def test_get_by_service_host(self):
self._create_nodes()
r = db.bm_node_get_all(self.context, service_host=None)
self.assertEqual(len(r), 6)
r = db.bm_node_get_all(self.context, service_host="host1")
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '0')
r = db.bm_node_get_all(self.context, service_host="host2")
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('1', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
r = db.bm_node_get_all(self.context, service_host="host3")
self.assertEqual(r, [])
def test_get_associated(self):
self._create_nodes()
r = db.bm_node_get_associated(self.context, service_host=None)
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '1')
r = db.bm_node_get_unassociated(self.context, service_host=None)
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('0', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
def test_destroy(self):
self._create_nodes()
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_destroy_with_interfaces(self):
self._create_nodes()
if_a_id = db.bm_interface_create(self.context, self.ids[0],
'aa:aa:aa:aa:aa:aa', None, None)
if_b_id = db.bm_interface_create(self.context, self.ids[0],
'bb:bb:bb:bb:bb:bb', None, None)
if_x_id = db.bm_interface_create(self.context, self.ids[1],
'11:22:33:44:55:66', None, None)
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_a_id)
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_b_id)
# Another node's interface is not affected
if_x = db.bm_interface_get(self.context, if_x_id)
self.assertEqual(self.ids[1], if_x['bm_node_id'])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_find_free(self):
self._create_nodes()
fn = db.bm_node_find_free(self.context, 'host2')
self.assertEqual(fn['pm_address'], '2')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=500, cpus=2, local_gb=100)
self.assertEqual(fn['pm_address'], '3')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=1001, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1000)
self.assertEqual(fn['pm_address'], '5')
# check memory_mb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2001, cpus=2, local_gb=1000)
self.assertIsNone(fn)
# check cpus
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=3, local_gb=1000)
self.assertIsNone(fn)
# check local_gb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1001)
self.assertIsNone(fn)
| berrange/nova | nova/tests/virt/baremetal/db/test_bm_node.py | Python | apache-2.0 | 6,886 |
# -*- coding: utf-8 -*-
#
# pysaml2 documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 24 08:13:41 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysaml2'
copyright = u'2010-2011, Roland Hedberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.0beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysaml2doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pysaml2.tex', u'pysaml2 Documentation',
u'Roland Hedberg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/pysaml2-2.4.0/doc/conf.py | Python | gpl-2.0 | 6,371 |
"""passlib tests"""
| charukiewicz/beer-manager | venv/lib/python3.4/site-packages/passlib/tests/__init__.py | Python | mit | 20 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.catalogue.backends.generic import CatalogueBackend \
as GenericCatalogueBackend
class CatalogueBackend(GenericCatalogueBackend):
"""GeoNetwork CSW Backend"""
def __init__(self, *args, **kwargs):
super(CatalogueBackend, self).__init__(*args, **kwargs)
self.catalogue.formats = ['Dublin Core', 'ISO']
| Phil-LiDAR2-Geonode/pl2-geonode | geonode/catalogue/backends/geonetwork.py | Python | gpl-3.0 | 1,161 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { u'A': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'B': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'C': { 'fieldname': u'precip',
'n': 300,
'name': u'precip',
'type': 'SDRCategoryEncoder',
'w': 21},
u'D': { 'clipInput': True,
'fieldname': u'visitor_winloss',
'maxval': 0.78600000000000003,
'minval': 0.0,
'n': 150,
'name': u'visitor_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'E': { 'clipInput': True,
'fieldname': u'home_winloss',
'maxval': 0.69999999999999996,
'minval': 0.0,
'n': 150,
'name': u'home_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'F': { 'dayOfWeek': (7, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'G': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 1),
'type': 'DateEncoder'},
u'pred': { 'clipInput': True,
'fieldname': u'attendance',
'maxval': 36067,
'minval': 0,
'n': 150,
'name': u'attendance',
'type': 'AdaptiveScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'baseball benchmark test',
u'streams': [ { u'columns': [ u'daynight',
u'precip',
u'home_winloss',
u'visitor_winloss',
u'attendance',
u'timestamp'],
u'info': u'OAK01.csv',
u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='trivial_aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| sambitgaan/nupic | tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py | Python | agpl-3.0 | 16,827 |
#!/usr/bin/env python
# coding: utf-8
import unittest
import sys
import os
PROJECT_PATH = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
ROOT_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(os.path.join(PROJECT_PATH, 'src'))
tests = unittest.TestLoader().discover(ROOT_PATH, "*tests.py")
result = unittest.TextTestRunner().run(tests)
if not result.wasSuccessful():
sys.exit(1)
| renzon/gae-continuous-delivery | test/testloader.py | Python | mit | 635 |
# (c) 2017, Brian Coca
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: yaml
short_description: File backed, YAML formated.
description:
- File backed cache that uses YAML as a format, the files are per host.
version_added: "2.3"
author: Brian Coca (@bcoca)
'''
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by yaml files.
"""
def _load(self, filepath):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return AnsibleLoader(f).get_single_data()
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
| RackSec/ansible | lib/ansible/plugins/cache/yaml.py | Python | gpl-3.0 | 1,666 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .saferscanner import SaferScanner
class LexingError(Exception):
@classmethod
def from_text(cls, rulestr, unmatched, msg='Lexing error'):
bad_char = len(rulestr) - len(unmatched)
linenum = rulestr[:bad_char].count('\n') + 1
charnum = len(rulestr[:bad_char].rsplit('\n', 1)[-1]) + 1
snippet_start = max(0, min(len(rulestr), bad_char - 10))
snippet_end = max(0, min(len(rulestr), bad_char + 10))
msg += " (Error at: '...%s...')" % (rulestr[snippet_start:snippet_end],)
raise cls(linenum, charnum, msg)
def __init__(self, linenum, charnum, msg='Lexing error'):
self.linenum = linenum
self.charnum = charnum
self.msg = msg
self.args = (linenum, charnum, msg)
def __str__(self):
return '%s at line %d, char %d' % (self.msg, self.linenum, self.charnum)
class Hint:
def __init__(self, text):
self.text = text
def __hash__(self):
return hash((id(self.__class__), self.text))
def __eq__(self, other):
return isinstance(other, self.__class__) and other.text == self.text
def __repr__(self):
return '%s(%r)' % (self.__class__, self.text)
def is_hint(x):
return isinstance(x, Hint)
class ParseContext:
"""
These are meant to be immutable, although it would be something of a
pain to enforce that in python.
"""
def __init__(self, ruleset, bindings, matched, remainder, productionname):
self.ruleset = ruleset
self.bindings = bindings
self.matched = matched
self.remainder = remainder
self.productionname = productionname
def get_production_by_name(self, name):
return self.ruleset[name]
def get_completer(self, symname):
return self.ruleset[(self.productionname, symname)]
def get_binding(self, name, default=None):
return self.bindings.get(name, default)
def with_binding(self, name, val):
newbinds = self.bindings.copy()
newbinds[name] = val
return self.__class__(self.ruleset, newbinds, self.matched,
self.remainder, self.productionname)
def with_match(self, num):
return self.__class__(self.ruleset, self.bindings,
self.matched + self.remainder[:num],
self.remainder[num:], self.productionname)
def with_production_named(self, newname):
return self.__class__(self.ruleset, self.bindings, self.matched,
self.remainder, newname)
def extract_orig(self, tokens=None):
if tokens is None:
tokens = self.matched
if not tokens:
return ''
orig = self.bindings.get('*SRC*', None)
if orig is None:
# pretty much just guess
return ' '.join([t[1] for t in tokens])
# low end of span for first token, to high end of span for last token
orig_text = orig[tokens[0][2][0]:tokens[-1][2][1]]
# Convert all unicode tokens to ascii, where possible. This
# helps avoid problems with performing unicode-incompatible
# operations on tokens (like .lower()). See CASSANDRA-9083
# for one example of this.
try:
orig_text = orig_text.encode('ascii')
except UnicodeEncodeError:
pass
return orig_text
def __repr__(self):
return '<%s matched=%r remainder=%r prodname=%r bindings=%r>' \
% (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings)
class matcher:
def __init__(self, arg):
self.arg = arg
def match(self, ctxt, completions):
raise NotImplementedError
def match_with_results(self, ctxt, completions):
matched_before = len(ctxt.matched)
newctxts = self.match(ctxt, completions)
return [(newctxt, newctxt.matched[matched_before:]) for newctxt in newctxts]
@staticmethod
def try_registered_completion(ctxt, symname, completions):
debugging = ctxt.get_binding('*DEBUG*', False)
if ctxt.remainder or completions is None:
return False
try:
completer = ctxt.get_completer(symname)
except KeyError:
return False
if debugging:
print "Trying completer %r with %r" % (completer, ctxt)
try:
new_compls = completer(ctxt)
except Exception:
if debugging:
import traceback
traceback.print_exc()
return False
if debugging:
print "got %r" % (new_compls,)
completions.update(new_compls)
return True
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.arg)
class choice(matcher):
def match(self, ctxt, completions):
foundctxts = []
for a in self.arg:
subctxts = a.match(ctxt, completions)
foundctxts.extend(subctxts)
return foundctxts
class one_or_none(matcher):
def match(self, ctxt, completions):
return [ctxt] + list(self.arg.match(ctxt, completions))
class repeat(matcher):
def match(self, ctxt, completions):
found = [ctxt]
ctxts = [ctxt]
while True:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(self.arg.match(c, completions))
if not new_ctxts:
return found
found.extend(new_ctxts)
ctxts = new_ctxts
class rule_reference(matcher):
def match(self, ctxt, completions):
prevname = ctxt.productionname
try:
rule = ctxt.get_production_by_name(self.arg)
except KeyError:
raise ValueError("Can't look up production rule named %r" % (self.arg,))
output = rule.match(ctxt.with_production_named(self.arg), completions)
return [c.with_production_named(prevname) for c in output]
class rule_series(matcher):
def match(self, ctxt, completions):
ctxts = [ctxt]
for patpiece in self.arg:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(patpiece.match(c, completions))
if not new_ctxts:
return ()
ctxts = new_ctxts
return ctxts
class named_symbol(matcher):
def __init__(self, name, arg):
matcher.__init__(self, arg)
self.name = name
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
results = self.arg.match_with_results(ctxt, pass_in_compls)
return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results]
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
class named_collector(named_symbol):
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
output = []
for ctxt, matchtoks in self.arg.match_with_results(ctxt, pass_in_compls):
oldval = ctxt.get_binding(self.name, ())
output.append(ctxt.with_binding(self.name, oldval + (ctxt.extract_orig(matchtoks),)))
return output
class terminal_matcher(matcher):
def pattern(self):
raise NotImplementedError
class regex_rule(terminal_matcher):
def __init__(self, pat):
terminal_matcher.__init__(self, pat)
self.regex = pat
self.re = re.compile(pat + '$', re.I | re.S)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.re.match(ctxt.remainder[0][1]):
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(Hint('<%s>' % ctxt.productionname))
return []
def pattern(self):
return self.regex
class text_match(terminal_matcher):
alpha_re = re.compile(r'[a-zA-Z]')
def __init__(self, text):
try:
terminal_matcher.__init__(self, eval(text))
except SyntaxError:
print "bad syntax %r" % (text,)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg.lower() == ctxt.remainder[0][1].lower():
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
# can't use (?i) here- Scanner component regex flags won't be applied
def ignorecaseify(matchobj):
c = matchobj.group(0)
return '[%s%s]' % (c.upper(), c.lower())
return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
class case_match(text_match):
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg == ctxt.remainder[0][1]:
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
return re.escape(self.arg)
class word_match(text_match):
def pattern(self):
return r'\b' + text_match.pattern(self) + r'\b'
class case_word_match(case_match):
def pattern(self):
return r'\b' + case_match.pattern(self) + r'\b'
class terminal_type_matcher(matcher):
def __init__(self, tokentype, submatcher):
matcher.__init__(self, tokentype)
self.tokentype = tokentype
self.submatcher = submatcher
def match(self, ctxt, completions):
if ctxt.remainder:
if ctxt.remainder[0][0] == self.tokentype:
return [ctxt.with_match(1)]
elif completions is not None:
self.submatcher.match(ctxt, completions)
return []
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.tokentype, self.submatcher)
class ParsingRuleSet:
RuleSpecScanner = SaferScanner([
(r'::=', lambda s,t: t),
(r'\[[a-z0-9_]+\]=', lambda s,t: ('named_collector', t[1:-2])),
(r'[a-z0-9_]+=', lambda s,t: ('named_symbol', t[:-1])),
(r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s,t: ('regex', t[1:-1].replace(r'\/', '/'))),
(r'"([^"]|\\.)*"', lambda s,t: ('litstring', t)),
(r'<[^>]*>', lambda s,t: ('reference', t[1:-1])),
(r'\bJUNK\b', lambda s,t: ('junk', t)),
(r'[@()|?*;]', lambda s,t: t),
(r'\s+', None),
(r'#[^\n]*', None),
], re.I | re.S)
def __init__(self):
self.ruleset = {}
self.scanner = None
self.terminals = []
@classmethod
def from_rule_defs(cls, rule_defs):
prs = cls()
prs.ruleset, prs.terminals = cls.parse_rules(rule_defs)
return prs
@classmethod
def parse_rules(cls, rulestr):
tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
if unmatched:
raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
rules = {}
terminals = []
tokeniter = iter(tokens)
for t in tokeniter:
if isinstance(t, tuple) and t[0] in ('reference', 'junk'):
assign = tokeniter.next()
if assign != '::=':
raise ValueError('Unexpected token %r; expected "::="' % (assign,))
name = t[1]
production = cls.read_rule_tokens_until(';', tokeniter)
if isinstance(production, terminal_matcher):
terminals.append((name, production))
production = terminal_type_matcher(name, production)
rules[name] = production
else:
raise ValueError('Unexpected token %r; expected name' % (t,))
return rules, terminals
@staticmethod
def mkrule(pieces):
if isinstance(pieces, (tuple, list)):
if len(pieces) == 1:
return pieces[0]
return rule_series(pieces)
return pieces
@classmethod
def read_rule_tokens_until(cls, endtoks, tokeniter):
if isinstance(endtoks, basestring):
endtoks = (endtoks,)
counttarget = None
if isinstance(endtoks, int):
counttarget = endtoks
endtoks = ()
countsofar = 0
myrules = []
mybranches = [myrules]
for t in tokeniter:
countsofar += 1
if t in endtoks:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
if isinstance(t, tuple):
if t[0] == 'reference':
t = rule_reference(t[1])
elif t[0] == 'litstring':
if t[1][1].isalnum() or t[1][1] == '_':
t = word_match(t[1])
else:
t = text_match(t[1])
elif t[0] == 'regex':
t = regex_rule(t[1])
elif t[0] == 'named_collector':
t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t[0] == 'named_symbol':
t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t == '(':
t = cls.read_rule_tokens_until(')', tokeniter)
elif t == '?':
t = one_or_none(myrules.pop(-1))
elif t == '*':
t = repeat(myrules.pop(-1))
elif t == '@':
x = tokeniter.next()
if not isinstance(x, tuple) or x[0] != 'litstring':
raise ValueError("Unexpected token %r following '@'" % (x,))
t = case_match(x[1])
elif t == '|':
myrules = []
mybranches.append(myrules)
continue
else:
raise ValueError('Unparseable rule token %r after %r' % (t, myrules[-1]))
myrules.append(t)
if countsofar == counttarget:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
raise ValueError('Unexpected end of rule tokens')
def append_rules(self, rulestr):
rules, terminals = self.parse_rules(rulestr)
self.ruleset.update(rules)
self.terminals.extend(terminals)
if terminals:
self.scanner = None # recreate it if/when necessary
def register_completer(self, func, rulename, symname):
self.ruleset[(rulename, symname)] = func
def make_lexer(self):
def make_handler(name):
if name == 'JUNK':
return None
return lambda s, t: (name, t, s.match.span())
regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
return SaferScanner(regexes, re.I | re.S).scan
def lex(self, text):
if self.scanner is None:
self.scanner = self.make_lexer()
tokens, unmatched = self.scanner(text)
if unmatched:
raise LexingError.from_text(text, unmatched, 'text could not be lexed')
return tokens
def parse(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
return pattern.match(ctxt, None)
def whole_match(self, startsymbol, tokens, srcstr=None):
bindings = {}
if srcstr is not None:
bindings['*SRC*'] = srcstr
for c in self.parse(startsymbol, tokens, init_bindings=bindings):
if not c.remainder:
return c
def lex_and_parse(self, text, startsymbol='Start'):
return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text})
def lex_and_whole_match(self, text, startsymbol='Start'):
tokens = self.lex(text)
return self.whole_match(startsymbol, tokens, srcstr=text)
def complete(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
if init_bindings.get('*DEBUG*', False):
completions = Debugotron(stream=sys.stderr)
else:
completions = set()
pattern.match(ctxt, completions)
return completions
import sys, traceback
class Debugotron(set):
depth = 10
def __init__(self, initializer=(), stream=sys.stdout):
set.__init__(self, initializer)
self.stream = stream
def add(self, item):
self._note_addition(item)
set.add(self, item)
def _note_addition(self, foo):
self.stream.write("\nitem %r added by:\n" % (foo,))
frame = sys._getframe().f_back.f_back
for i in range(self.depth):
name = frame.f_code.co_name
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if 'self' in frame.f_locals:
clsobj = frame.f_locals['self']
line = '%s.%s() (%s:%d)' % (clsobj, name, filename, lineno)
else:
line = '%s (%s:%d)' % (name, filename, lineno)
self.stream.write(' - %s\n' % (line,))
if i == 0 and 'ctxt' in frame.f_locals:
self.stream.write(' - %s\n' % (frame.f_locals['ctxt'],))
frame = frame.f_back
def update(self, items):
if items:
self._note_addition(items)
set.update(self, items)
| mitch-kyle/message-board | support/apache-cassandra-2.2.1/pylib/cqlshlib/pylexotron.py | Python | apache-2.0 | 18,820 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestSchoolHouse(unittest.TestCase):
pass
| manqala/erpnext | erpnext/schools/doctype/school_house/test_school_house.py | Python | gpl-3.0 | 231 |
import os
filename = os.path.basename(__file__)
def main(request, response):
if request.method == 'POST':
return 302, [('Location', './%s?redirect' % filename)], ''
return [('Content-Type', 'text/plain')], request.request_path
| UK992/servo | tests/wpt/web-platform-tests/service-workers/service-worker/resources/navigation-redirect-body.py | Python | mpl-2.0 | 246 |
from warnings import warn
from beaker.crypto.pbkdf2 import PBKDF2, strxor
from beaker.crypto.util import hmac, sha1, hmac_sha1, md5
from beaker import util
keyLength = None
if util.jython:
try:
from beaker.crypto.jcecrypto import getKeyLength, aesEncrypt
keyLength = getKeyLength()
except ImportError:
pass
else:
try:
from beaker.crypto.pycrypto import getKeyLength, aesEncrypt, aesDecrypt
keyLength = getKeyLength()
except ImportError:
pass
if not keyLength:
has_aes = False
else:
has_aes = True
if has_aes and keyLength < 32:
warn('Crypto implementation only supports key lengths up to %d bits. '
'Generated session cookies may be incompatible with other '
'environments' % (keyLength * 8))
def generateCryptoKeys(master_key, salt, iterations):
# NB: We XOR parts of the keystream into the randomly-generated parts, just
# in case os.urandom() isn't as random as it should be. Note that if
# os.urandom() returns truly random data, this will have no effect on the
# overall security.
keystream = PBKDF2(master_key, salt, iterations=iterations)
cipher_key = keystream.read(keyLength)
return cipher_key
| Arno-Nymous/pyload | module/lib/beaker/crypto/__init__.py | Python | gpl-3.0 | 1,233 |
#!/usr/bin/env python3
import os
import sys
import xml.dom
from xml.dom import minidom
# STRINGTABLE DIAG TOOL
# Author: KoffeinFlummi
# ---------------------
# Counts duplicates stringtable entries
def check_module(projectpath, module):
""" Checks the given module for all the different languages. """
localized = []
stringtablepath = os.path.join(projectpath, module, "stringtable.xml")
try:
xmldoc = minidom.parse(stringtablepath)
except IOError:
return 0
keys = xmldoc.getElementsByTagName("Key")
duplicates = 0
for key in keys:
children = key.childNodes
entries = []
for c in range(children.length):
entries.append(children.item(c))
entries = list(filter(lambda x: x.nodeType == x.ELEMENT_NODE, entries))
entries = list(map(lambda x: str(x.nodeName).lower(), entries))
diff = len(entries) - len(list(set(entries)))
duplicates += diff
if diff > 0:
print(key.getAttribute("ID"))
return duplicates
def main():
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
projectpath = os.path.join(projectpath, "addons")
print("###############################")
print("# Stringtable Duplicates Tool #")
print("###############################\n")
duplicates = 0
for module in os.listdir(projectpath):
d = check_module(projectpath, module)
print("# {} {}".format(module.ljust(20), d))
duplicates += d
print("\nTotal number of duplicates: {}".format(duplicates))
if __name__ == "__main__":
main()
| MikeMatrix/ACE3 | tools/stringtableduplicates.py | Python | gpl-2.0 | 1,652 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br']]>]]>[\r\n]?')
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Failed to commit", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512', b'terminal exec prompt no-timestamp'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/terminal/iosxr.py | Python | bsd-3-clause | 1,889 |
#import sys, traceback
import mal_readline
import mal_types as types
from mal_types import (MalSym, MalInt, MalStr,
_symbol, _keywordu,
MalList, _list, MalVector, MalHashMap, MalFunc)
import reader, printer
from env import Env
# read
def READ(str):
return reader.read_str(str)
# eval
def eval_ast(ast, env):
if types._symbol_Q(ast):
assert isinstance(ast, MalSym)
return env.get(ast)
elif types._list_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalList(res)
elif types._vector_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalVector(res)
elif types._hash_map_Q(ast):
new_dct = {}
for k in ast.dct.keys():
new_dct[k] = EVAL(ast.dct[k], env)
return MalHashMap(new_dct)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
if len(ast) == 0: return ast
a0 = ast[0]
if not isinstance(a0, MalSym):
raise Exception("attempt to apply on non-symbol")
if u"def!" == a0.value:
a1, a2 = ast[1], ast[2]
res = EVAL(a2, env)
return env.set(a1, res)
elif u"let*" == a0.value:
a1, a2 = ast[1], ast[2]
let_env = Env(env)
for i in range(0, len(a1), 2):
let_env.set(a1[i], EVAL(a1[i+1], let_env))
return EVAL(a2, let_env)
else:
el = eval_ast(ast, env)
f = el.values[0]
if isinstance(f, MalFunc):
return f.apply(el.values[1:])
else:
raise Exception("%s is not callable" % f)
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
repl_env = Env()
def REP(str, env):
return PRINT(EVAL(READ(str), env))
def plus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value+b.value)
def minus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value-b.value)
def multiply(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value*b.value)
def divide(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(int(a.value/b.value))
repl_env.set(_symbol(u'+'), MalFunc(plus))
repl_env.set(_symbol(u'-'), MalFunc(minus))
repl_env.set(_symbol(u'*'), MalFunc(multiply))
repl_env.set(_symbol(u'/'), MalFunc(divide))
def entry_point(argv):
while True:
try:
line = mal_readline.readline("user> ")
if line == "": continue
print(REP(line, repl_env))
except EOFError as e:
break
except reader.Blank:
continue
except types.MalException as e:
print(u"Error: %s" % printer._pr_str(e.object, False))
except Exception as e:
print("Error: %s" % e)
#print("".join(traceback.format_exception(*sys.exc_info())))
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point
# Just run entry_point if not RPython compilation
import sys
if not sys.argv[0].endswith('rpython'):
entry_point(sys.argv)
| alphaKAI/mal | rpython/step3_env.py | Python | mpl-2.0 | 3,570 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
# based on svr4pkg by
# Boyd Adamson <boyd () boydadamson.com> (2012)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgutil
short_description: Manage CSW-Packages on Solaris
description:
- Manages CSW packages (SVR4 format) on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Pkgutil is an advanced packaging system, which resolves dependency on installation.
It is designed for CSW packages.
version_added: "1.3"
author: "Alexander Winkler (@dermute)"
options:
name:
description:
- Package name, e.g. (C(CSWnrpe))
required: true
site:
description:
- Specifies the repository path to install the package from.
- Its global definition is done in C(/etc/opt/csw/pkgutil.conf).
required: false
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- The upgrade (C(latest)) operation will update/install the package to the latest version available.
- "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them."
required: true
choices: ["present", "absent", "latest"]
update_catalog:
description:
- If you want to refresh your catalog from the mirror, set this to (C(yes)).
required: false
default: False
version_added: "2.1"
'''
EXAMPLES = '''
# Install a package
- pkgutil:
name: CSWcommon
state: present
# Install a package from a specific repository
- pkgutil:
name: CSWnrpe
site: 'ftp://myinternal.repo/opencsw/kiel'
state: latest
'''
from ansible.module_utils.basic import AnsibleModule
def package_installed(module, name):
cmd = ['pkginfo']
cmd.append('-q')
cmd.append(name)
rc, out, err = run_command(module, cmd)
if rc == 0:
return True
else:
return False
def package_latest(module, name, site):
# Only supports one package
cmd = ['pkgutil', '-U', '--single', '-c']
if site is not None:
cmd += ['-t', site]
cmd.append(name)
rc, out, err = run_command(module, cmd)
# replace | tail -1 |grep -v SAME
# use -2, because splitting on \n create a empty line
# at the end of the list
return 'SAME' in out.split('\n')[-2]
def run_command(module, cmd, **kwargs):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
return module.run_command(cmd, **kwargs)
def package_install(module, state, name, site, update_catalog):
cmd = ['pkgutil', '-iy']
if update_catalog:
cmd += ['-U']
if site is not None:
cmd += ['-t', site]
if state == 'latest':
cmd += ['-f']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
def package_upgrade(module, name, site, update_catalog):
cmd = ['pkgutil', '-ufy']
if update_catalog:
cmd += ['-U']
if site is not None:
cmd += ['-t', site]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
def package_uninstall(module, name):
cmd = ['pkgutil', '-ry', name]
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent', 'latest']),
site=dict(default=None),
update_catalog=dict(required=False, default=False, type='bool'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
site = module.params['site']
update_catalog = module.params['update_catalog']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if not package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, state, name, site, update_catalog)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
if rc != 0:
if err:
msg = err
else:
msg = out
module.fail_json(msg=msg)
elif state == 'latest':
if not package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, state, name, site, update_catalog)
if len(out) > 75:
out = out[:75] + '...'
if rc != 0:
if err:
msg = err
else:
msg = out
module.fail_json(msg=msg)
else:
if not package_latest(module, name, site):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_upgrade(module, name, site, update_catalog)
if len(out) > 75:
out = out[:75] + '...'
if rc != 0:
if err:
msg = err
else:
msg = out
module.fail_json(msg=msg)
elif state == 'absent':
if package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name)
if len(out) > 75:
out = out[:75] + '...'
if rc != 0:
if err:
msg = err
else:
msg = out
module.fail_json(msg=msg)
if rc is None:
# pkgutil was not executed because the package was already present/absent
result['changed'] = False
elif rc == 0:
result['changed'] = True
else:
result['changed'] = False
result['failed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/packaging/os/pkgutil.py | Python | gpl-3.0 | 6,757 |
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='*',
help='App labels of applications to limit the output to.',
)
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
formats = parser.add_mutually_exclusive_group()
formats.add_argument(
'--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.',
)
formats.add_argument(
'--plan', '-p', action='store_const', dest='format', const='plan',
help=(
'Shows all migrations in the order they will be applied. '
'With a verbosity level of 2 or above all direct migration dependencies '
'and reverse dependencies (run_before) will be included.'
)
)
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options['verbosity']
# Get the database we're operating from
db = options['database']
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection, options['app_label'])
else:
return self.show_list(connection, options['app_label'])
def _validate_app_names(self, loader, app_names):
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError('No migrations present for: %s' % (', '.join(sorted(invalid_apps))))
def show_list(self, connection, app_names=None):
"""
Show a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
self._validate_app_names(loader, app_names)
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.ERROR)
def show_plan(self, connection, app_names=None):
"""
Show all known migrations (or only those of the specified app_names)
in the order they will be applied.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
if app_names:
self._validate_app_names(loader, app_names)
targets = [key for key in graph.leaf_nodes() if key[0] in app_names]
else:
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
node = graph.node_map[migration]
plan.append(node)
seen.add(migration)
# Output
def print_deps(node):
out = []
for parent in sorted(node.parents):
out.append("%s.%s" % parent.key)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for node in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(node)
if node.key in loader.applied_migrations:
self.stdout.write("[X] %s.%s%s" % (node.key[0], node.key[1], deps))
else:
self.stdout.write("[ ] %s.%s%s" % (node.key[0], node.key[1], deps))
| ifduyue/django | django/core/management/commands/showmigrations.py | Python | bsd-3-clause | 5,333 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for checking and dropping stale gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_util
class DropStaleGradientOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that checks and drops stale gradient.
This optimizer records the global step for each worker before computing
gradients and compares it with the global step at the time of applying the
gradients. If the difference is larger than a threshold, it will drop all
the computed gradients.
"""
def __init__(self,
opt,
staleness,
use_locking=False,
name="DropStaleGradient"):
"""Constructs a new DropStaleGradientOptimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
staleness: The maximum staleness allowed for the optimizer.
use_locking: If `True` use locks for clip update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "DropStaleGradient".
"""
super(DropStaleGradientOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._staleness = staleness
def compute_gradients(self, loss, *args, **kwargs):
# Record current global step for worker.
with ops.colocate_with(loss):
self._local_step = training_util.get_global_step() + 0
with ops.control_dependencies([self._local_step]):
loss = gen_array_ops.identity(loss)
return self._opt.compute_gradients(loss, *args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
gradients = []
# Number of stale gradients.
stale_counter = variable_scope.get_variable(
"stale_counter", [],
initializer=init_ops.zeros_initializer(),
trainable=False)
def _AcceptGradientOp():
with ops.control_dependencies(
[self._opt.apply_gradients(
grads_and_vars, global_step=global_step, name=name)]):
return gen_array_ops.identity(0.0)
def _DropGradientOp():
return gen_array_ops.identity(1.0)
for grad_and_var in grads_and_vars:
grad = grad_and_var[0]
if isinstance(grad, ops.Tensor):
gradients.append(grad)
elif grad is not None:
gradients.append(grad.op)
with ops.control_dependencies(gradients), ops.colocate_with(global_step):
staleness = gen_array_ops.reshape(
global_step - self._local_step, shape=())
conditional_update = stale_counter.assign_add(control_flow_ops.cond(
gen_math_ops.less_equal(staleness, self._staleness),
_AcceptGradientOp, _DropGradientOp))
summary.scalar(
"Gradient staleness percentage",
stale_counter / (math_ops.cast(global_step + 1, dtypes.float32)))
return conditional_update
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py | Python | bsd-2-clause | 4,333 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
# TODO(b/31222613): These ops may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("TensorArray")
ops.NotDifferentiable("TensorArrayGrad")
ops.NotDifferentiable("TensorArraySize")
ops.NotDifferentiable("TensorArrayClose")
ops.NotDifferentiable("TensorArrayV2")
ops.NotDifferentiable("TensorArrayGradV2")
ops.NotDifferentiable("TensorArraySizeV2")
ops.NotDifferentiable("TensorArrayCloseV2")
ops.NotDifferentiable("TensorArrayV3")
ops.NotDifferentiable("TensorArrayGradV3")
ops.NotDifferentiable("TensorArraySizeV3")
ops.NotDifferentiable("TensorArrayCloseV3")
def _GetGradSource(op_or_tensor):
"""Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow throuth the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
"""
name_tokens = op_or_tensor.name.split("/")
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")]
if not grad_pos:
raise ValueError(
"Expected op/tensor name to start with gradients (excluding scope)"
", got: %s" % op_or_tensor.name)
return "/".join(name_tokens[:grad_pos[-1] + 1])
@ops.RegisterGradient("TensorArrayRead")
@ops.RegisterGradient("TensorArrayReadV2")
@ops.RegisterGradient("TensorArrayReadV3")
def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
w_g = g.write(index, grad)
return [None, None, w_g.flow]
@ops.RegisterGradient("TensorArrayWrite")
@ops.RegisterGradient("TensorArrayWriteV2")
@ops.RegisterGradient("TensorArrayWriteV3")
def _TensorArrayWriteGrad(op, flow):
"""Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.read(index)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayGather")
@ops.RegisterGradient("TensorArrayGatherV2")
@ops.RegisterGradient("TensorArrayGatherV3")
def _TensorArrayGatherGrad(op, grad):
"""Gradient for TensorArrayGather.
Args:
op: Forward TensorArrayGather op.
grad: Gradient `Tensor` to TensorArrayGather.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
indices = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.scatter(indices, grad)
return [None, None, u_g.flow]
@ops.RegisterGradient("TensorArrayScatter")
@ops.RegisterGradient("TensorArrayScatterV2")
@ops.RegisterGradient("TensorArrayScatterV3")
def _TensorArrayScatterGrad(op, flow):
"""Gradient for TensorArrayScatter.
Args:
op: Forward TensorArrayScatter op.
flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
indices = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.gather(indices)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayConcat")
@ops.RegisterGradient("TensorArrayConcatV2")
@ops.RegisterGradient("TensorArrayConcatV3")
def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
"""Gradient for TensorArrayConcat.
Args:
op: Forward TensorArrayConcat op.
grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
lengths = op.outputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.split(grad, lengths=lengths)
# handle, flow_in
return [None, u_g.flow]
@ops.RegisterGradient("TensorArraySplit")
@ops.RegisterGradient("TensorArraySplitV2")
@ops.RegisterGradient("TensorArraySplitV3")
def _TensorArraySplitGrad(op, flow):
"""Gradient for TensorArraySplit.
Args:
op: Forward TensorArraySplit op.
flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.concat()
# handle, value, lengths, flow_in
return [None, grad, None, flow]
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/python/ops/tensor_array_grad.py | Python | bsd-2-clause | 9,083 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import copy_reg
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return str(self._cmsg)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
| cherrishes/weilai | xingxing/protobuf/python/lib/Python3.4/google/protobuf/internal/cpp_message.py | Python | apache-2.0 | 23,543 |
#!/usr/bin/env python
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> compact = simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
>>> # Can't assume dict ordering
>>> compact in ('[1,2,3,{"4":5,"6":7}]', '[1,2,3,{"6":7,"4":5}]')
True
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import simplejson
>>> print repr(simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == ["foo", {"bar":["baz", None, 1.0, 2]}]
True
>>> simplejson.loads('"\\"foo\\bar"') == '"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io) == ["streaming API"]
True
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> simplejson.loads('1.1', parse_float=Decimal) == Decimal("1.1")
True
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(ComplexEncoder().iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.5'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| fernandalavalle/mlab-ns | server/mapreduce/lib/simplejson/__init__.py | Python | apache-2.0 | 12,383 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
float_or_none,
ISO639Utils,
)
class AdobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration') or
self._search_regex(
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
_TEST = {
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
'url': 'https://video.tv.adobe.com/v/2456/',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_params = self._parse_json(self._search_regex(
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
video_id)
formats = [{
'url': source['src'],
'width': source.get('width'),
'height': source.get('height'),
'tbr': source.get('bitrate'),
} for source in player_params['sources']]
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
for source in player_params['sources']]))
subtitles = {}
for translation in player_params.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
subtitles[lang_id].append({
'url': translation['vttPath'],
'ext': 'vtt',
})
return {
'id': video_id,
'formats': formats,
'title': player_params['title'],
'description': self._og_search_description(webpage),
'duration': duration,
'subtitles': subtitles,
}
| apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/adobetv.py | Python | unlicense | 4,630 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import logging
import re
import shlex
import sys
import time
import os
from webkitpy.common.system import path
from webkitpy.common.system.profiler import ProfilerFactory
_log = logging.getLogger(__name__)
class DriverInput(object):
def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args=None):
self.test_name = test_name
self.timeout = timeout # in ms
self.image_hash = image_hash
self.should_run_pixel_test = should_run_pixel_test
self.args = args or []
class DriverOutput(object):
"""Groups information about a output from driver for easy passing
and post-processing of data."""
strip_patterns = []
strip_patterns.append((re.compile('at \(-?[0-9]+,-?[0-9]+\) *'), ''))
strip_patterns.append((re.compile('size -?[0-9]+x-?[0-9]+ *'), ''))
strip_patterns.append((re.compile('text run width -?[0-9]+: '), ''))
strip_patterns.append((re.compile('text run width -?[0-9]+ [a-zA-Z ]+: '), ''))
strip_patterns.append((re.compile('RenderButton {BUTTON} .*'), 'RenderButton {BUTTON}'))
strip_patterns.append((re.compile('RenderImage {INPUT} .*'), 'RenderImage {INPUT}'))
strip_patterns.append((re.compile('RenderBlock {INPUT} .*'), 'RenderBlock {INPUT}'))
strip_patterns.append((re.compile('RenderTextControl {INPUT} .*'), 'RenderTextControl {INPUT}'))
strip_patterns.append((re.compile('\([0-9]+px'), 'px'))
strip_patterns.append((re.compile(' *" *\n +" *'), ' '))
strip_patterns.append((re.compile('" +$'), '"'))
strip_patterns.append((re.compile('- '), '-'))
strip_patterns.append((re.compile('\n( *)"\s+'), '\n\g<1>"'))
strip_patterns.append((re.compile('\s+"\n'), '"\n'))
strip_patterns.append((re.compile('scrollWidth [0-9]+'), 'scrollWidth'))
strip_patterns.append((re.compile('scrollHeight [0-9]+'), 'scrollHeight'))
strip_patterns.append((re.compile('scrollX [0-9]+'), 'scrollX'))
strip_patterns.append((re.compile('scrollY [0-9]+'), 'scrollY'))
strip_patterns.append((re.compile('scrolled to [0-9]+,[0-9]+'), 'scrolled'))
def __init__(self, text, image, image_hash, audio, crash=False,
test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
crashed_pid=None, crash_log=None, pid=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
self.image_hash = image_hash
self.image_diff = None # image_diff gets filled in after construction.
self.audio = audio # Binary format is port-dependent.
self.crash = crash
self.crashed_process_name = crashed_process_name
self.crashed_pid = crashed_pid
self.crash_log = crash_log
self.test_time = test_time
self.measurements = measurements
self.timeout = timeout
self.error = error # stderr output
self.pid = pid
def has_stderr(self):
return bool(self.error)
def strip_metrics(self):
if not self.text:
return
for pattern in self.strip_patterns:
self.text = re.sub(pattern[0], pattern[1], self.text)
class Driver(object):
"""object for running test(s) using DumpRenderTree/WebKitTestRunner."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Initialize a Driver to subsequently run tests.
Typically this routine will spawn DumpRenderTree in a config
ready for subsequent input.
port - reference back to the port object.
worker_number - identifier for a particular worker/driver instance
"""
self._port = port
self._worker_number = worker_number
self._no_timeout = no_timeout
self._driver_tempdir = None
# WebKitTestRunner can report back subprocess crashes by printing
# "#CRASHED - PROCESSNAME". Since those can happen at any time
# and ServerProcess won't be aware of them (since the actual tool
# didn't crash, just a subprocess) we record the crashed subprocess name here.
self._crashed_process_name = None
self._crashed_pid = None
# WebKitTestRunner can report back subprocesses that became unresponsive
# This could mean they crashed.
self._subprocess_was_unresponsive = False
# stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
# stderr output, as well as if we've seen #EOF on this driver instance.
# FIXME: We should probably remove _read_first_block and _read_optional_image_block and
# instead scope these locally in run_test.
self.error_from_test = str()
self.err_seen_eof = False
self._server_process = None
self._measurements = {}
if self._port.get_option("profile"):
profiler_name = self._port.get_option("profiler")
self._profiler = ProfilerFactory.create_profiler(self._port.host,
self._port._path_to_driver(), self._port.results_directory(), profiler_name)
else:
self._profiler = None
def __del__(self):
self.stop()
def run_test(self, driver_input, stop_when_done):
"""Run a single test and return the results.
Note that it is okay if a test times out or crashes and leaves
the driver in an indeterminate state. The upper layers of the program
are responsible for cleaning up and ensuring things are okay.
Returns a DriverOutput object.
"""
start_time = time.time()
self.start(driver_input.should_run_pixel_test, driver_input.args)
test_begin_time = time.time()
self.error_from_test = str()
self.err_seen_eof = False
command = self._command_from_driver_input(driver_input)
deadline = test_begin_time + int(driver_input.timeout) / 1000.0
self._server_process.write(command)
text, audio = self._read_first_block(deadline) # First block is either text or audio
image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data.
crashed = self.has_crashed()
timed_out = self._server_process.timed_out
pid = self._server_process.pid()
if stop_when_done or crashed or timed_out:
# We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
# In the timeout case, we kill the hung process as well.
out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
if out:
text += out
if err:
self.error_from_test += err
self._server_process = None
crash_log = None
if crashed:
self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
# If we were unresponsive append a message informing there may not have been a crash.
if self._subprocess_was_unresponsive:
crash_log += 'Process failed to become responsive before timing out.\n'
# Print stdout and stderr to the placeholder crash log; we want as much context as possible.
if self.error_from_test:
crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
return DriverOutput(text, image, actual_image_hash, audio,
crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
timeout=timed_out, error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid, crash_log=crash_log, pid=pid)
def _get_crash_log(self, stdout, stderr, newer_than):
return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
# FIXME: Seems this could just be inlined into callers.
@classmethod
def _command_wrapper(cls, wrapper_option):
# Hook for injecting valgrind or other runtime instrumentation,
# used by e.g. tools/valgrind/valgrind_tests.py.
return shlex.split(wrapper_option) if wrapper_option else []
HTTP_DIR = "http/tests/"
HTTP_LOCAL_DIR = "http/tests/local/"
def is_http_test(self, test_name):
return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
def test_to_uri(self, test_name):
"""Convert a test name to a URI."""
if not self.is_http_test(test_name):
return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
relative_path = test_name[len(self.HTTP_DIR):]
# TODO(dpranke): remove the SSL reference?
if relative_path.startswith("ssl/"):
return "https://127.0.0.1:8443/" + relative_path
return "http://127.0.0.1:8000/" + relative_path
def uri_to_test(self, uri):
"""Return the base layout test name for a given URI.
This returns the test name for a given URI, e.g., if you passed in
"file:///src/LayoutTests/fast/html/keygen.html" it would return
"fast/html/keygen.html".
"""
if uri.startswith("file:///"):
prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
if not prefix.endswith('/'):
prefix += '/'
return uri[len(prefix):]
if uri.startswith("http://"):
return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
if uri.startswith("https://"):
return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
raise NotImplementedError('unknown url type: %s' % uri)
def has_crashed(self):
if self._server_process is None:
return False
if self._crashed_process_name:
return True
if self._server_process.has_crashed():
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
return True
return False
def start(self, pixel_tests, per_test_args):
# FIXME: Callers shouldn't normally call this, since this routine
# may not be specifying the correct combination of pixel test and
# per_test args.
#
# The only reason we have this routine at all is so the perftestrunner
# can pause before running a test; it might be better to push that
# into run_test() directly.
if not self._server_process:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
def _setup_environ_for_driver(self, environment):
environment['DYLD_LIBRARY_PATH'] = self._port._build_path()
environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
# FIXME: We're assuming that WebKitTestRunner checks this DumpRenderTree-named environment variable.
# FIXME: Commented out for now to avoid tests breaking. Re-enable after
# we cut over to NRWT
#environment['DUMPRENDERTREE_TEMP'] = str(self._port._driver_tempdir_for_environment())
environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
if 'WEBKIT_OUTPUTDIR' in os.environ:
environment['WEBKIT_OUTPUTDIR'] = os.environ['WEBKIT_OUTPUTDIR']
if self._profiler:
environment = self._profiler.adjusted_environment(environment)
return environment
def _start(self, pixel_tests, per_test_args):
self.stop()
self._driver_tempdir = self._port._driver_tempdir()
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server(server_name)
environment = self._setup_environ_for_driver(environment)
self._crashed_process_name = None
self._crashed_pid = None
self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
self._server_process.start()
def _run_post_start_tasks(self):
# Remote drivers may override this to delay post-start tasks until the server has ack'd.
if self._profiler:
self._profiler.attach_to_pid(self._pid_on_target())
def _pid_on_target(self):
# Remote drivers will override this method to return the pid on the device.
return self._server_process.pid()
def stop(self):
if self._server_process:
self._server_process.stop(self._port.driver_stop_timeout())
self._server_process = None
if self._profiler:
self._profiler.profile_after_exit()
if self._driver_tempdir:
self._port._filesystem.rmtree(str(self._driver_tempdir))
self._driver_tempdir = None
def cmd_line(self, pixel_tests, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
if self._port.get_option('gc_between_tests'):
cmd.append('--gc-between-tests')
if self._port.get_option('complex_text'):
cmd.append('--complex-text')
if self._port.get_option('threaded'):
cmd.append('--threaded')
if self._no_timeout:
cmd.append('--no-timeout')
# FIXME: We need to pass --timeout=SECONDS to WebKitTestRunner for WebKit2.
cmd.extend(self._port.get_option('additional_drt_flag', []))
cmd.extend(self._port.additional_drt_flag())
cmd.extend(per_test_args)
cmd.append('-')
return cmd
def _check_for_driver_crash(self, error_line):
if error_line == "#CRASHED\n":
# This is used on Windows to report that the process has crashed
# See http://trac.webkit.org/changeset/65537.
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
elif (error_line.startswith("#CRASHED - ")
or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
# WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
self._crashed_process_name = match.group(1) if match else 'WebProcess'
match = re.search('pid (\d+)', error_line)
pid = int(match.group(1)) if match else None
self._crashed_pid = pid
# FIXME: delete this after we're sure this code is working :)
_log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
if error_line.startswith("#PROCESS UNRESPONSIVE - "):
self._subprocess_was_unresponsive = True
self._port.sample_process(self._crashed_process_name, self._crashed_pid)
# We want to show this since it's not a regular crash and probably we don't have a crash log.
self.error_from_test += error_line
return True
return self.has_crashed()
def _command_from_driver_input(self, driver_input):
# FIXME: performance tests pass in full URLs instead of test names.
if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://') or driver_input.test_name == ('about:blank'):
command = driver_input.test_name
elif self.is_http_test(driver_input.test_name):
command = self.test_to_uri(driver_input.test_name)
else:
command = self._port.abspath_for_test(driver_input.test_name)
if sys.platform == 'cygwin':
command = path.cygpath(command)
assert not driver_input.image_hash or driver_input.should_run_pixel_test
# ' is the separator between arguments.
if self._port.supports_per_test_timeout():
command += "'--timeout'%s" % driver_input.timeout
if driver_input.should_run_pixel_test:
command += "'--pixel-test"
if driver_input.image_hash:
command += "'" + driver_input.image_hash
return command + "\n"
def _read_first_block(self, deadline):
# returns (text_content, audio_content)
block = self._read_block(deadline)
if block.malloc:
self._measurements['Malloc'] = float(block.malloc)
if block.js_heap:
self._measurements['JSHeap'] = float(block.js_heap)
if block.content_type == 'audio/wav':
return (None, block.decoded_content)
return (block.decoded_content, None)
def _read_optional_image_block(self, deadline):
# returns (image, actual_image_hash)
block = self._read_block(deadline, wait_for_stderr_eof=True)
if block.content and block.content_type == 'image/png':
return (block.decoded_content, block.content_hash)
return (None, block.content_hash)
def _read_header(self, block, line, header_text, header_attr, header_filter=None):
if line.startswith(header_text) and getattr(block, header_attr) is None:
value = line.split()[1]
if header_filter:
value = header_filter(value)
setattr(block, header_attr, value)
return True
return False
def _process_stdout_line(self, block, line):
if (self._read_header(block, line, 'Content-Type: ', 'content_type')
or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
or self._read_header(block, line, 'ActualHash: ', 'content_hash')
or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')):
return
# Note, we're not reading ExpectedHash: here, but we could.
# If the line wasn't a header, we just append it to the content.
block.content += line
def _strip_eof(self, line):
if line and line.endswith("#EOF\n"):
return line[:-5], True
return line, False
def _read_block(self, deadline, wait_for_stderr_eof=False):
block = ContentBlock()
out_seen_eof = False
while not self.has_crashed():
if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
break
if self.err_seen_eof:
out_line = self._server_process.read_stdout_line(deadline)
err_line = None
elif out_seen_eof:
out_line = None
err_line = self._server_process.read_stderr_line(deadline)
else:
out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
if self._server_process.timed_out or self.has_crashed():
break
if out_line:
assert not out_seen_eof
out_line, out_seen_eof = self._strip_eof(out_line)
if err_line:
assert not self.err_seen_eof
err_line, self.err_seen_eof = self._strip_eof(err_line)
if out_line:
if out_line[-1] != "\n":
_log.error("Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.")
content_length_before_header_check = block._content_length
self._process_stdout_line(block, out_line)
# FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
# Don't wait until we're done with headers, just read the binary blob right now.
if content_length_before_header_check != block._content_length:
block.content = self._server_process.read_stdout(deadline, block._content_length)
if err_line:
if self._check_for_driver_crash(err_line):
break
self.error_from_test += err_line
block.decode_content()
return block
class ContentBlock(object):
def __init__(self):
self.content_type = None
self.encoding = None
self.content_hash = None
self._content_length = None
# Content is treated as binary data even though the text output is usually UTF-8.
self.content = str() # FIXME: Should be bytearray() once we require Python 2.6.
self.decoded_content = None
self.malloc = None
self.js_heap = None
def decode_content(self):
if self.encoding == 'base64' and self.content is not None:
self.decoded_content = base64.b64decode(self.content)
else:
self.decoded_content = self.content
class DriverProxy(object):
"""A wrapper for managing two Driver instances, one with pixel tests and
one without. This allows us to handle plain text tests and ref tests with a
single driver."""
def __init__(self, port, worker_number, driver_instance_constructor, pixel_tests, no_timeout):
self._port = port
self._worker_number = worker_number
self._driver_instance_constructor = driver_instance_constructor
self._no_timeout = no_timeout
# FIXME: We shouldn't need to create a driver until we actually run a test.
self._driver = self._make_driver(pixel_tests)
self._driver_cmd_line = None
def _make_driver(self, pixel_tests):
return self._driver_instance_constructor(self._port, self._worker_number, pixel_tests, self._no_timeout)
# FIXME: this should be a @classmethod (or implemented on Port instead).
def is_http_test(self, test_name):
return self._driver.is_http_test(test_name)
# FIXME: this should be a @classmethod (or implemented on Port instead).
def test_to_uri(self, test_name):
return self._driver.test_to_uri(test_name)
# FIXME: this should be a @classmethod (or implemented on Port instead).
def uri_to_test(self, uri):
return self._driver.uri_to_test(uri)
def run_test(self, driver_input, stop_when_done):
base = self._port.lookup_virtual_test_base(driver_input.test_name)
if base:
virtual_driver_input = copy.copy(driver_input)
virtual_driver_input.test_name = base
virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
return self.run_test(virtual_driver_input, stop_when_done)
pixel_tests_needed = driver_input.should_run_pixel_test
cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
if cmd_line_key != self._driver_cmd_line:
self._driver.stop()
self._driver = self._make_driver(pixel_tests_needed)
self._driver_cmd_line = cmd_line_key
return self._driver.run_test(driver_input, stop_when_done)
def has_crashed(self):
return self._driver.has_crashed()
def stop(self):
self._driver.stop()
# FIXME: this should be a @classmethod (or implemented on Port instead).
def cmd_line(self, pixel_tests=None, per_test_args=None):
return self._driver.cmd_line(pixel_tests, per_test_args or [])
def _cmd_line_as_key(self, pixel_tests, per_test_args):
return ' '.join(self.cmd_line(pixel_tests, per_test_args))
| klickagent/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/driver.py | Python | bsd-3-clause | 25,616 |
"""
Constants used across the ORM in general.
"""
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/db/models/constants.py | Python | bsd-3-clause | 118 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for EncodeBase64 and DecodeBase64."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class Base64OpsTest(test_util.TensorFlowTestCase):
def setUp(self):
self._msg = array_ops.placeholder(dtype=dtypes.string)
self._encoded_f = string_ops.encode_base64(self._msg, pad=False)
self._decoded_f = string_ops.decode_base64(self._encoded_f)
self._encoded_t = string_ops.encode_base64(self._msg, pad=True)
self._decoded_t = string_ops.decode_base64(self._encoded_t)
def _RemovePad(self, msg, base64_msg):
if len(msg) % 3 == 1:
return base64_msg[:-2]
if len(msg) % 3 == 2:
return base64_msg[:-1]
return base64_msg
def _RunTest(self, msg, pad):
with self.test_session() as sess:
if pad:
encoded, decoded = sess.run([self._encoded_t, self._decoded_t],
feed_dict={self._msg: msg})
else:
encoded, decoded = sess.run([self._encoded_f, self._decoded_f],
feed_dict={self._msg: msg})
if not isinstance(msg, (list, tuple)):
msg = [msg]
encoded = [encoded]
decoded = [decoded]
base64_msg = [base64.urlsafe_b64encode(m) for m in msg]
if not pad:
base64_msg = [self._RemovePad(m, b) for m, b in zip(msg, base64_msg)]
for i in range(len(msg)):
self.assertEqual(base64_msg[i], encoded[i])
self.assertEqual(msg[i], decoded[i])
def testWithPythonBase64(self):
for pad in (False, True):
self._RunTest(b"", pad=pad)
for _ in range(100):
length = np.random.randint(1024 * 1024)
msg = np.random.bytes(length)
self._RunTest(msg, pad=pad)
def testShape(self):
for pad in (False, True):
for _ in range(10):
msg = [np.random.bytes(np.random.randint(20))
for _ in range(np.random.randint(10))]
self._RunTest(msg, pad=pad)
# Zero-element, non-trivial shapes.
for _ in range(10):
k = np.random.randint(10)
msg = np.empty((0, k), dtype=bytes)
encoded = string_ops.encode_base64(msg, pad=pad)
decoded = string_ops.decode_base64(encoded)
with self.test_session() as sess:
encoded_value, decoded_value = sess.run([encoded, decoded])
self.assertEqual(encoded_value.shape, msg.shape)
self.assertEqual(decoded_value.shape, msg.shape)
def testInvalidInput(self):
def try_decode(enc):
self._decoded_f.eval(feed_dict={self._encoded_f: enc})
with self.test_session():
# Invalid length.
msg = np.random.bytes(99)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "1 modulo 4"):
try_decode(enc + b"a")
# Invalid char used in encoding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg)):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"?" + enc[(i + 1):])
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"\x80" + enc[(i + 1):]) # outside ascii range.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"+" + enc[(i + 1):]) # not url-safe.
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"/" + enc[(i + 1):]) # not url-safe.
# Partial padding.
msg = np.random.bytes(34)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
# enc contains == at the end. Partial padding is not allowed.
try_decode(enc[:-1])
# Unnecessary padding.
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"==")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"===")
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc + b"====")
# Padding in the middle. (Previous implementation was ok with this as long
# as padding char location was 2 or 3 (mod 4).
msg = np.random.bytes(33)
enc = base64.urlsafe_b64encode(msg)
for i in range(len(msg) - 1):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"=" + enc[(i + 1):])
for i in range(len(msg) - 2):
with self.assertRaises(errors.InvalidArgumentError):
try_decode(enc[:i] + b"==" + enc[(i + 2):])
if __name__ == "__main__":
test.main()
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/kernel_tests/base64_ops_test.py | Python | apache-2.0 | 5,658 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class DecisionsToDataTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
# pylint: disable=W0612
self.input_data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
def testInferenceConstruction(self):
with variable_scope.variable_scope(
"DecisionsToDataTest_testInferenceContruction"):
graph_builder = decisions_to_data.DecisionsToDataLayer(self.params, 0,
None)
unused_graph = graph_builder.inference_graph(self.input_data)
if __name__ == "__main__":
googletest.main()
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/tensor_forest/hybrid/python/layers/decisions_to_data_test.py | Python | apache-2.0 | 2,506 |
#!/usr/bin/env python
# encoding: utf-8
# from __future__ import unicode_literals
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
def readme():
"Falls back to just file().read() on any error, because the conversion to rst is only really relevant when uploading the package to pypi"
from subprocess import CalledProcessError
try:
from subprocess import check_output
return check_output(['pandoc', '--from', 'markdown', '--to', 'rst', 'README.md']).decode('utf-8')
except (ImportError, OSError, CalledProcessError) as error:
print('pandoc is required to get the description as rst (as required to get nice rendering in pypi) - using the original markdown instead.',
'See http://johnmacfarlane.net/pandoc/')
return open(path.join(here, 'README.md')).read().decode('utf-8')
setup(
name='simplesuper',
description='Simpler way to call super methods without all the repetition',
long_description=readme(),
version='1.0.9',
classifiers=[
"Programming Language :: Python :: 2",
"Topic :: Software Development",
"Topic :: Utilities",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: ISC License (ISCL)",
],
author='Martin Häcker, Robert Buchholz, Felix Schwarz',
author_email='mhaecker@mac.com, rbu@rbu.sh, felix@schwarz-online.org',
license="ISC",
url='https://github.com/dwt/simplesuper',
keywords='python 2, super, convenience, api',
py_modules=['simplesuper'],
test_suite = "simplesuper",
)
| dwt/simplesuper | setup.py | Python | isc | 1,671 |
#/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" This file uses the fpga_interchange to create a very simple FPGA design.
This design is target the 7-series FPGA line, and the physical netlist is
suitable for a Artix 50T class fabric.
To test this flow:
- Invoke this script to output the logical netlist, physical netlist, and a
small XDC file to set the IOSTANDARD's on the ports.
- Use RapidWright's interchange branch to create a DCP using the entry point
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp
Example:
export RAPIDWRIGHT_PATH=~/RapidWright
$RAPIDWRIGHT_PATH/scripts/invoke_rapidwright.sh \
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp \
test.netlist test.phys test.xdc test.dcp
"""
import argparse
from fpga_interchange.interchange_capnp import Interchange, write_capnp_file
from fpga_interchange.logical_netlist import Library, Cell, Direction, CellInstance, LogicalNetlist
from fpga_interchange.physical_netlist import PhysicalNetlist, PhysicalBelPin, \
Placement, PhysicalPip, PhysicalSitePin, PhysicalSitePip, \
chain_branches, chain_pips, PhysicalNetType, PhysicalCellType
def example_logical_netlist():
hdi_primitives = Library('hdi_primitives')
cell = Cell('FDRE')
cell.add_port('D', Direction.Input)
cell.add_port('C', Direction.Input)
cell.add_port('CE', Direction.Input)
cell.add_port('R', Direction.Input)
cell.add_port('Q', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('IBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('OBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('BUFG')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('VCC')
cell.add_port('P', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('GND')
cell.add_port('G', Direction.Output)
hdi_primitives.add_cell(cell)
top = Cell('top')
top.add_port('i', Direction.Input)
top.add_port('clk', Direction.Input)
top.add_port('o', Direction.Output)
top.add_cell_instance('ibuf', 'IBUF')
top.add_cell_instance('obuf', 'OBUF')
top.add_cell_instance('clk_ibuf', 'IBUF')
top.add_cell_instance('clk_buf', 'BUFG')
top.add_cell_instance('ff', 'FDRE')
top.add_cell_instance('VCC', 'VCC')
top.add_cell_instance('GND', 'GND')
top.add_net('i')
top.connect_net_to_cell_port('i', 'i')
top.connect_net_to_instance('i', 'ibuf', 'I')
top.add_net('i_buf')
top.connect_net_to_instance('i_buf', 'ibuf', 'O')
top.connect_net_to_instance('i_buf', 'ff', 'D')
top.add_net('o_buf')
top.connect_net_to_instance('o_buf', 'ff', 'Q')
top.connect_net_to_instance('o_buf', 'obuf', 'I')
top.add_net('o')
top.connect_net_to_instance('o', 'obuf', 'O')
top.connect_net_to_cell_port('o', 'o')
top.add_net('clk')
top.connect_net_to_cell_port('clk', 'clk')
top.connect_net_to_instance('clk', 'clk_ibuf', 'I')
top.add_net('clk_ibuf')
top.connect_net_to_instance('clk_ibuf', 'clk_ibuf', 'O')
top.connect_net_to_instance('clk_ibuf', 'clk_buf', 'I')
top.add_net('clk_buf')
top.connect_net_to_instance('clk_buf', 'clk_buf', 'O')
top.connect_net_to_instance('clk_buf', 'ff', 'C')
top.add_net('GLOBAL_LOGIC1')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'VCC', 'P')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'ff', 'CE')
top.add_net('GLOBAL_LOGIC0')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'GND', 'G')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'ff', 'R')
work = Library('work')
work.add_cell(top)
logical_netlist = LogicalNetlist(
name='top',
top_instance_name='top',
top_instance=CellInstance(
cell_name='top',
view='netlist',
property_map={},
),
property_map={},
libraries={
'work': work,
'hdi_primitives': hdi_primitives,
})
return logical_netlist
def example_physical_netlist():
phys_netlist = PhysicalNetlist(part='xc7a50tfgg484-1')
ibuf_placement = Placement(
cell_type='IBUF', cell_name='ibuf', site='IOB_X0Y12', bel='INBUF_EN')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y12', site_type='IOB33')
obuf_placement = Placement(
cell_type='OBUF', cell_name='obuf', site='IOB_X0Y11', bel='OUTBUF')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='IN', cell_pin='I')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(obuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y11', site_type='IOB33')
clk_ibuf_placement = Placement(
cell_type='IBUF',
cell_name='clk_ibuf',
site='IOB_X0Y24',
bel='INBUF_EN')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(clk_ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y24', site_type='IOB33')
clk_buf_placement = Placement(
cell_type='BUFG',
cell_name='clk_buf',
site='BUFGCTRL_X0Y0',
bel='BUFG')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='I0', cell_pin='I')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='O', cell_pin='O')
phys_netlist.add_placement(clk_buf_placement)
phys_netlist.add_site_instance(site_name='BUFGCTRL_X0Y0', site_type='BUFG')
ff_placement = Placement(
cell_type='FDRE', cell_name='ff', site='SLICE_X1Y12', bel='AFF')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='SR', cell_pin='R')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='D', cell_pin='D')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='Q', cell_pin='Q')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CE', cell_pin='CE')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CK', cell_pin='C')
phys_netlist.add_placement(ff_placement)
phys_netlist.add_site_instance(site_name='SLICE_X1Y12', site_type='SLICEL')
i_root = chain_branches((PhysicalBelPin('IOB_X0Y12', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='i', sources=[i_root], stubs=[])
i_buf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y12', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y12', 'I', 'I'),
PhysicalSitePin('IOB_X0Y12', 'I')) +
chain_pips('LIOI3_X0Y11', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'IOI_LOGIC_OUTS18_1')) +
(PhysicalPip('IO_INT_INTERFACE_L_X0Y12',
'INT_INTERFACE_LOGIC_OUTS_L_B18',
'INT_INTERFACE_LOGIC_OUTS_L18'),
PhysicalPip('INT_L_X0Y12', 'LOGIC_OUTS_L18', 'EE2BEG0'),
PhysicalPip('INT_L_X2Y12', 'EE2END0', 'BYP_ALT0'),
PhysicalPip('INT_L_X2Y12', 'BYP_ALT0', 'BYP_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_BYP0', 'CLBLL_L_AX'),
PhysicalSitePin('SLICE_X1Y12', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AX', 'AX'),
PhysicalSitePip('SLICE_X1Y12', 'AFFMUX', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'D')))
phys_netlist.add_physical_net(
net_name='i_buf', sources=[i_buf_root], stubs=[])
o_buf_root = chain_branches(
(PhysicalBelPin('SLICE_X1Y12', 'AFF', 'Q'),
PhysicalBelPin('SLICE_X1Y12', 'AQ', 'AQ'),
PhysicalSitePin('SLICE_X1Y12', 'AQ'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_L_AQ', 'CLBLL_LOGIC_OUTS0'),
PhysicalPip('INT_L_X2Y12', 'LOGIC_OUTS_L0', 'SL1BEG0'),
PhysicalPip('INT_L_X2Y11', 'SL1END0', 'WW2BEG0'),
PhysicalPip('INT_L_X0Y11', 'WW2END0', 'IMUX_L34')) +
chain_pips('LIOI3_X0Y11', ('IOI_IMUX34_0', 'IOI_OLOGIC1_D1',
'LIOI_OLOGIC1_OQ', 'LIOI_O1')) +
(
PhysicalSitePin('IOB_X0Y11', 'O'),
PhysicalBelPin('IOB_X0Y11', 'O', 'O'),
PhysicalSitePip('IOB_X0Y11', 'OUSED', '0'),
PhysicalBelPin('IOB_X0Y11', 'OUTBUF', 'IN'),
))
phys_netlist.add_physical_net(
net_name='o_buf', sources=[o_buf_root], stubs=[])
o_root = chain_branches((PhysicalBelPin('IOB_X0Y11', 'OUTBUF', 'OUT'),
PhysicalBelPin('IOB_X0Y11', 'PAD', 'PAD')))
phys_netlist.add_physical_net(net_name='o', sources=[o_root], stubs=[])
clk_root = chain_branches((PhysicalBelPin('IOB_X0Y24', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y24', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='clk', sources=[clk_root], stubs=[])
clk_ibuf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y24', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y24', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y24', 'I', 'I'),
PhysicalSitePin('IOB_X0Y24', 'I')) +
chain_pips('LIOI3_X0Y23', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'LIOI_I2GCLK_TOP0')) +
(PhysicalPip('HCLK_CMT_X8Y26', 'HCLK_CMT_CCIO3',
'HCLK_CMT_MUX_CLK_13'),
PhysicalPip('CLK_HROW_BOT_R_X60Y26', 'CLK_HROW_CK_IN_L13',
'CLK_HROW_BOT_R_CK_BUFG_CASCO0'),
PhysicalPip('CLK_BUFG_BOT_R_X60Y48', 'CLK_BUFG_BOT_R_CK_MUXED0',
'CLK_BUFG_BUFGCTRL0_I0'),
PhysicalSitePin('BUFGCTRL_X0Y0', 'I0'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'I0', 'I0'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'BUFG', 'I0')))
phys_netlist.add_physical_net(
net_name='clk_ibuf', sources=[clk_ibuf_root], stubs=[])
clk_buf_root = chain_branches(
(PhysicalBelPin('BUFGCTRL_X0Y0', 'BUFG', 'O'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'O', 'O'),
PhysicalSitePin('BUFGCTRL_X0Y0', 'O'),
PhysicalPip('CLK_BUFG_BOT_R_X60Y48', 'CLK_BUFG_BUFGCTRL0_O',
'CLK_BUFG_CK_GCLK0'),
PhysicalPip(
'CLK_BUFG_REBUF_X60Y38',
'CLK_BUFG_REBUF_R_CK_GCLK0_TOP',
'CLK_BUFG_REBUF_R_CK_GCLK0_BOT',
forward=False)) + chain_pips('CLK_HROW_BOT_R_X60Y26', (
'CLK_HROW_R_CK_GCLK0', 'CLK_HROW_CK_MUX_OUT_L2',
'CLK_HROW_CK_HCLK_OUT_L2', 'CLK_HROW_CK_BUFHCLK_L2')) + (
PhysicalPip('HCLK_R_X12Y26', 'HCLK_CK_BUFHCLK2',
'HCLK_LEAF_CLK_B_BOT4'),
PhysicalPip('INT_R_X3Y12', 'GCLK_B4', 'GCLK_B4_WEST'),
PhysicalPip('INT_L_X2Y12', 'GCLK_L_B4', 'CLK_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_CLK0', 'CLBLL_L_CLK'),
PhysicalSitePin('SLICE_X1Y12', 'CLK'),
PhysicalBelPin('SLICE_X1Y12', 'CLK', 'CLK'),
PhysicalSitePip('SLICE_X1Y12', 'CLKINV', 'CLK'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'CK'),
))
phys_netlist.add_physical_net(
net_name='clk_buf', sources=[clk_buf_root], stubs=[])
const0 = chain_branches((
PhysicalBelPin('SLICE_X1Y12', 'SRUSEDGND', '0'),
PhysicalSitePip('SLICE_X1Y12', 'SRUSEDMUX', '0'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'SR'),
))
phys_netlist.add_physical_net(
net_name='GLOBAL_LOGIC0',
sources=[
const0,
],
stubs=[],
net_type=PhysicalNetType.Gnd)
const1 = chain_branches((
PhysicalBelPin('SLICE_X1Y12', 'CEUSEDVCC', '1'),
PhysicalSitePip('SLICE_X1Y12', 'CEUSEDMUX', '1'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'CE'),
))
phys_netlist.add_physical_net(
net_name='GLOBAL_LOGIC1',
sources=[const1],
stubs=[],
net_type=PhysicalNetType.Vcc)
phys_netlist.add_physical_cell(
cell_name='ibuf', cell_type=PhysicalCellType.Port)
phys_netlist.add_physical_cell(
cell_name='obuf', cell_type=PhysicalCellType.Port)
return phys_netlist
def example_xdc():
return """\
set_property IOSTANDARD LVCMOS33 [get_ports]
"""
def main():
parser = argparse.ArgumentParser(
description=
"Create an example netlist, suitable for use with Vivado 2019.2")
parser.add_argument('--schema_dir', required=True)
parser.add_argument('--logical_netlist', required=True)
parser.add_argument('--physical_netlist', required=True)
parser.add_argument('--xdc', required=True)
args = parser.parse_args()
interchange = Interchange(args.schema_dir)
logical_netlist = example_logical_netlist()
logical_netlist_capnp = logical_netlist.convert_to_capnp(interchange)
phys_netlist = example_physical_netlist()
phys_netlist_capnp = phys_netlist.convert_to_capnp(interchange)
with open(args.logical_netlist, 'wb') as f:
write_capnp_file(logical_netlist_capnp, f)
with open(args.physical_netlist, 'wb') as f:
write_capnp_file(phys_netlist_capnp, f)
with open(args.xdc, 'w') as f:
f.write(example_xdc())
if __name__ == "__main__":
main()
| SymbiFlow/python-fpga-interchange | tests/example_netlist.py | Python | isc | 13,854 |
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_secretary.settings')
app = Celery('test_secretary')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| rs-dev/Test-Secretary | test_secretary/celery_setup.py | Python | isc | 446 |
# -*- coding: utf-8 -*-
#
# tree.py
#
# (c) D.C.-G. 2014
#
# Tree widget for albow
#
from albow.widget import Widget
from albow.menu import Menu
from albow.fields import IntField, FloatField, TextFieldWrapped
from albow.controls import CheckBox, AttrRef, Label, Button
from albow.dialogs import ask, alert, input_text_buttons
from albow.translate import _
from extended_widgets import ChoiceButton
from theme import ThemeProperty
from layout import Column, Row
from dialogs import Dialog
from palette_view import PaletteView
from scrollpanel import ScrollRow
from utils import blit_in_rect
from pygame import image, Surface, Rect, SRCALPHA, draw, event
import copy
#-----------------------------------------------------------------------------
item_types_map = {dict: ("Compound", None, {}),
int: ("Integer", IntField, 0),
float: ("Floating point", FloatField, 0.0),
unicode: ("Text", TextFieldWrapped, ""),
bool: ("Boolean", CheckBox, True),
}
def setup_map_types_item(mp=None):
if not mp:
mp = item_types_map
map_types_item = {}
for k, v in mp.items():
if v[0] in map_types_item.keys():
_v = map_types_item.pop(v[0])
map_types_item[u"%s (%s)"%(_(v[0]), _v[0].__name__)] = _v
map_types_item[u"%s (%s)"%(_(v[0]), k.__name__)] = (k, v[1], v[2])
else:
map_types_item[v[0]] = (k, v[1], v[2])
return map_types_item
map_types_item = setup_map_types_item()
#-----------------------------------------------------------------------------
# Tree item builder methods
def create_base_item(self, i_type, i_name, i_value):
return i_name, type(i_type)(i_value)
create_dict = create_int = create_float = create_unicode = create_bool = create_base_item
#-----------------------------------------------------------------------------
class SetupNewItemPanel(Dialog):
def __init__(self, type_string, types=map_types_item, ok_action=None):
self.type_string = type_string
self.ok_action = ok_action
title = Label("Choose default data")
self.t, widget, self.v = types[type_string]
self.n = u""
w_name = TextFieldWrapped(ref=AttrRef(self, 'n'))
self.w_value = self.get_widget(widget)
col = Column([Column([title,]), Label(_("Item Type: %s")%type_string, doNotTranslate=True), Row([Label("Name"), w_name], margin=0), Row([Label("Value"), self.w_value], margin=0), Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss((self.t, self.n, getattr(self.w_value, 'value', map_types_item.get(self.type_string, [None,] * 3)[2])))
def get_widget(self, widget):
if hasattr(widget, 'value'):
value = widget(value=self.v)
elif hasattr(widget, 'text'):
value = widget(text=self.v)
elif widget is None:
value = Label("This item type is a container. Add chlidren later.")
else:
msg = "*** Error in SelectItemTypePanel.__init__():\n Widget <%s> has no 'text' or 'value' member."%widget
print msg
value = Label(msg)
return value
#-----------------------------------------------------------------------------
class SelectItemTypePanel(Dialog):
def __init__(self, title, responses, default=None, ok_action=None):
self.response = responses[0]
self.ok_action = ok_action
title = Label(title)
self.w_type = ChoiceButton(responses)
col = Column([title, self.w_type, Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=ok_action or self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss(self.w_type.selectedChoice)
#-----------------------------------------------------------------------------
def select_item_type(ok_action, types=map_types_item):
if len(types) > 1:
choices = types.keys()
choices.sort()
result = SelectItemTypePanel("Choose item type", responses=choices, default=None).present()
else:
result = types.keys()[0]
if type(result) in (str, unicode):
return SetupNewItemPanel(result, types, ok_action).present()
return None
#-----------------------------------------------------------------------------
class TreeRow(ScrollRow):
def click_item(self, n, e):
self.parent.click_item(n, e.local)
def mouse_down(self, e):
if e.button == 3:
_e = event.Event(e.type, {'alt': e.alt, 'meta': e.meta, 'ctrl': e.ctrl,
'shift': e.shift, 'button': 1, 'cmd': e.cmd,
'local': e.local, 'pos': e.pos,
'num_clicks': e.num_clicks})
ScrollRow.mouse_down(self, _e)
self.parent.show_menu(e.local)
else:
ScrollRow.mouse_down(self, e)
#-----------------------------------------------------------------------------
class Tree(Column):
"""..."""
rows = []
row_margin = 2
column_margin = 2
bullet_size = ThemeProperty('bullet_size')
bullet_color_active = ThemeProperty('bullet_color_active')
bullet_color_inactive = ThemeProperty('bullet_color_inactive')
def __init__(self, *args, **kwargs):
self.menu = [("Add", "add_item"),
("Delete", "delete_item"),
("New child", "add_child"),
("Rename", "rename_item"),
("", ""),
("Cut", "cut_item"),
("Copy", "copy_item"),
("Paste", "paste_item"),
("Paste as child", "paste_child"),
]
if not hasattr(self, 'map_types_item'):
global map_types_item
self.map_types_item = setup_map_types_item()
self.selected_item_index = None
self.selected_item = None
self.clicked_item = None
self.copyBuffer = kwargs.pop('copyBuffer', None)
self._parent = kwargs.pop('_parent', None)
self.styles = kwargs.pop('styles', {})
self.compound_types = [dict,] + kwargs.pop('compound_types', [])
self.item_types = self.compound_types + kwargs.pop('item_types', [a[0] for a in self.map_types_item.values()] or [int, float, unicode, bool])
for t in self.item_types:
if 'create_%s'%t.__name__ in globals().keys():
setattr(self, 'create_%s'%t.__name__, globals()['create_%s'%t.__name__])
self.show_fields = kwargs.pop('show_fields', False)
self.deployed = []
self.data = data = kwargs.pop("data", {})
self.draw_zebra = draw_zebra = kwargs.pop('draw_zebra', True)
# self.inner_width = kwargs.pop('inner_width', 'auto')
self.inner_width = kwargs.pop('inner_width', 500)
self.__num_rows = len(data.keys())
self.build_layout()
# row_height = self.font.size(' ')[1]
row_height = self.font.get_linesize()
self.treeRow = treeRow = TreeRow((self.inner_width, row_height), 10, draw_zebra=draw_zebra)
Column.__init__(self, [treeRow,], **kwargs)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if keyname == "Up" and self.selected_item_index > 0:
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = max(self.selected_item_index - 1, 0)
elif keyname == "Down" and self.selected_item_index < len(self.rows) - 1:
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index += 1
elif keyname == 'Page down':
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = min(len(self.rows) - 1, self.selected_item_index + self.treeRow.num_rows())
elif keyname == 'Page up':
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = max(0, self.selected_item_index - self.treeRow.num_rows())
if self.treeRow.cell_to_item_no(0, 0) != None and (self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 > self.selected_item_index or self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 < self.selected_item_index):
self.treeRow.scroll_to_item(self.selected_item_index)
if keyname == 'Return' and self.selected_item_index != None:
self.select_item(self.selected_item_index)
if self.selected_item[7] in self.compound_types:
self.deploy(self.selected_item[6])
def cut_item(self):
self.copyBuffer = ([] + self.selected_item, 1)
self.delete_item()
def copy_item(self):
self.copyBuffer = ([] + self.selected_item, 0)
def paste_item(self):
parent = self.get_item_parent(self.selected_item)
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
if self.copyBuffer[1] == 0:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(parent, (name, new_item))
def paste_child(self):
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
names = []
children = self.get_item_children(self.selected_item)
if children:
names = [a[3] for a in children]
if name in names:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(self.selected_item, (name, new_item))
@staticmethod
def add_item_to_dict(parent, name, item):
parent[name] = item
def add_item_to(self, parent, (name, item)):
if parent is None:
tp = 'dict'
parent = self.data
else:
tp = parent[7].__name__
parent = parent[9]
if not name:
i = 0
name = 'Item %03d'%i
while name in self.data.keys():
i += 1
name = 'Item %03d'%i
meth = getattr(self, 'add_item_to_%s'%tp, None)
if meth:
meth(parent, name, item)
self.build_layout()
else:
alert(_("No function implemented to add items to %s type.")%type(parent).__name__, doNotTranslate=True)
def add_item(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.get_item_parent(self.selected_item), new_item)
def add_child(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.selected_item, new_item)
def delete_item(self):
parent = self.get_item_parent(self.selected_item) or self.data
del parent[self.selected_item]
self.selected_item_index = None
self.selected_item = None
self.build_layout()
def rename_item(self):
result = input_text_buttons("Choose a name", 300, self.selected_item[3])
if type(result) in (str, unicode):
self.selected_item[3] = result
self.build_layout()
def get_item_parent(self, item):
if item:
pid = item[4]
for itm in self.rows:
if pid == itm[6]:
return itm
def get_item_children(self, item):
children = []
if item:
if item[6] in self.deployed:
cIds = item[5]
idx = self.rows.index(item)
for child in self.rows[idx:]:
if child[8] == item[8] + 1 and child[4] == item[6]:
children.append(child)
else:
k = item[3]
v = item[9]
lvl = item[8]
id = item[6]
aId = len(self.rows) + 1
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
itm = [lvl + 1, a, b, id, [], aId]
itm = [None, None, None, a, id, [], aId, type(b), lvl + 1, b]
children.insert(0, itm)
aId += 1
return children
def show_menu(self, pos):
if self.menu:
m = Menu("Menu", self.menu, handler=self)
i = m.present(self, pos)
if i > -1:
meth = getattr(self, self.menu[i][1], None)
if meth:
meth()
def cut_item_enabled(self):
return self.selected_item is not None
def copy_item_enabled(self):
return self.cut_item_enabled()
def paste_item_enabled(self):
return self.copyBuffer is not None
def paste_child_enabled(self):
if not self.selected_item:
return False
return self.paste_item_enabled() and self.selected_item[7] in self.compound_types
def add_item_enabled(self):
return True
def add_child_enabled(self):
if not self.selected_item:
return False
return self.selected_item[7] in self.compound_types
def delete_item_enabled(self):
return self.selected_item is not None
def rename_item_enabled(self):
return self.selected_item is not None
def build_layout(self):
data = self.data
parent = 0
children = []
keys = data.keys()
keys.sort()
items = [[0, a, data[a], parent, children, keys.index(a) + 1] for a in keys]
rows = []
w = 50
aId = len(items) + 1
while items:
lvl, k, v, p, c, id = items.pop(0)
_c = False
fields = []
c = [] + c
if type(v) in self.compound_types:
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
if id in self.deployed:
itm = [lvl + 1, a, b, id, [], aId]
items.insert(0, itm)
c.append(aId)
_c = True
aId += 1
else:
if type(v) in (list, tuple):
fields = v
elif type(v) not in self.compound_types or hasattr(self._parent, 'build_%s'%k.lower()):
fields = [v,]
head = Surface((self.bullet_size * (lvl + 1) + self.font.size(k)[0], self.bullet_size), SRCALPHA)
if _c:
meth = getattr(self, 'draw_%s_bullet'%{False: 'closed', True: 'opened'}[id in self.deployed])
else:
meth = getattr(self, 'draw_%s_bullet'%v.__class__.__name__, None)
if not meth:
meth = self.draw_deadend_bullet
bg, fg, shape, text = self.styles.get(type(v),
({True: self.bullet_color_active, False: self.bullet_color_inactive}[_c],
self.fg_color, 'square', ''),
)
try:
meth(head, bg, fg, shape, text, k, lvl)
except:
pass
rows.append([head, fields, [w] * len(fields), k, p, c, id, type(v), lvl, v])
self.rows = rows
return rows
def deploy(self, id):
p = None
if self.selected_item:
s_idx = 0 + self.selected_item_index
num_rows = len(self.rows)
if id in self.deployed:
self.deployed.remove(id)
if self.selected_item:
if self.selected_item[4] == id:
p = self.get_item_parent(self.selected_item)
p_idx = self.rows.index(p)
else:
self.deployed.append(id)
self.build_layout()
if p:
self.select_item(p_idx)
elif self.selected_item and s_idx > id:
self.select_item(s_idx + (len(self.rows) - num_rows))
def click_item(self, n, pos):
"""..."""
self.clicked_item = row = self.rows[n]
r = self.get_bullet_rect(row[0], row[8])
x = pos[0]
if self.margin + r.left - self.treeRow.hscroll <= x <= self.margin + self.treeRow.margin + r.right - self.treeRow.hscroll:
id = row[6]
self.deploy(id)
else:
self.select_item(n)
def select_item(self, n):
self.selected_item_index = n
self.selected_item = self.rows[n]
def get_bullet_rect(self, surf, lvl):
r = Rect(0, 0, self.bullet_size, self.bullet_size)
r.left = self.bullet_size * lvl
r.inflate_ip(-4, -4)
return r
def draw_item_text(self, surf, r, text):
buf = self.font.render(unicode(text), True, self.fg_color)
blit_in_rect(surf, buf, Rect(r.right, r.top, surf.get_width() - r.right, r.height), 'c')
def draw_deadend_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.midtop, r.midright, r.midbottom, r.midleft])
self.draw_item_text(surf, r, item_text)
def draw_closed_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midright, r.bottomleft])
self.draw_item_text(surf, r, item_text)
def draw_opened_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midbottom, r.topright])
self.draw_item_text(surf, r, item_text)
def draw_tree_cell(self, surf, i, data, cell_rect, column):
"""..."""
if type(data) in (str, unicode):
self.draw_text_cell(surf, i, data, cell_rect, 'l', self.font)
else:
self.draw_image_cell(surf, i, data, cell_rect, column)
@staticmethod
def draw_image_cell(surf, i, data, cell_rect, column):
"""..."""
blit_in_rect(surf, data, cell_rect, 'l')
def draw_text_cell(self, surf, i, data, cell_rect, align, font):
buf = font.render(unicode(data), True, self.fg_color)
blit_in_rect(surf, buf, cell_rect, align)
def num_rows(self):
return len(self.rows)
def row_data(self, row):
return self.rows[row]
def column_info(self, row_data):
m = self.column_margin
d = 2 * m
x = 0
for i in range(0,2):
if i < 1:
width = self.width
data = row_data[i]
yield i, x + m, width - d, None, data
x += width
if self.show_fields:
for i in range(len(row_data[2])):
width = 50 * (i + 1)
data = row_data[2][i]
if type(data) != (str, unicode):
data = repr(data)
yield i, x + m, width - d, None, data
x += width
| OniOniOn-/MCEdit-Unified | albow/tree.py | Python | isc | 20,845 |
import PandaProd.Producer.opts
PandaProd.Producer.opts.options.config = 'Autumn18'
from PandaProd.Producer.prod import process
| cpausmit/Kraken | pandaf/014/mc.py | Python | mit | 129 |
# -*- coding: utf-8 -*-
"""
UserBot module
Copyright 2015, Ismael R. Lugo G.
"""
import translate
reload(translate)
from sysb import commands
from translate import lang
from translate import _
commands.addHandler('translate', '(tr|translate)2 (?P<in>[^ ]+) (?P<out>[^ ]+) '
'(?P<text>.*)', {'sintax': 'tr2 <input> <output> <text>',
'example': 'tr2 en es Hello!',
'alias': ('traslate2',),
'desc': _('Traduce un texto de un idioma a otro', lang)},
anyuser=True)(translate.translate2_1)
commands.addHandler('translate', '(tr|translate) (?P<in>[^ ]+) (?P<out>[^ ]+) ('
'?P<text>.*)', {'sintax': 'tr <input> <output> <text>',
'example': 'tr en es Hello!',
'alias': ('traslate',),
'desc': _('Traduce un texto de un idioma a otro', lang)},
anyuser=True)(translate.translate2_2)
| IsmaelRLG/UserBot | mods/translate/__init__.py | Python | mit | 817 |
from models import Post, PostForm, Department
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, render, redirect
from django.template.loader import render_to_string
import os
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import urllib
from timboektu.books.config import NOTIFY_THRESHOLD, DELETE_THRESHOLD
from django.db.models import Count
import sys
# TODO combine with index, optional department id
def department(request, department_id):
department = get_object_or_404(Department, pk=department_id)
return index(request, department)
def index(request, department = None):
query = request.POST.get('query')
if not query:
query = request.GET.get('query')
order_by = request.GET.get('order_by')
if not order_by:
order_by = '-crdate'
page = request.GET.get('page')
# Get posts for query
#TODO extend .order_by for case insensitivity: .extra(select={'title': 'lower(title)'})
posts = []
if query:
posts = Post.objects.query_filter(query).order_by(order_by)
else:
posts = Post.objects.all().order_by(order_by)
# Filter for department
if department:
posts = posts.filter(departments__id=department.id)
# Paging
num_per_page = 15 if query or department else 5
paginator = Paginator(posts, num_per_page)
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
return render(request, 'index.html', {
'posts': posts,
'departments': Department.objects.annotate(my_count=Count('post')),
'current_department': department,
'query' : query,
'title_order_by' : '-title' if order_by == 'title' else 'title',
'title_order_class' : 'dec' if order_by == 'title' else 'asc' if order_by == '-title' else '',
'price_order_by' : '-price' if order_by == 'price' else 'price',
'price_order_class' : 'dec' if order_by == 'price' else 'asc' if order_by == '-price' else '',
})
def detail(request, post_id):
p = get_object_or_404(Post, pk=post_id)
email = urllib.quote(render_to_string('emails/purchase.html', {'post': p}))
subject = urllib.quote("Interest in your advertisement on TimBoekTU")
mailto = p.email + '?subject=' + subject + '&body=' + email
return render(request, 'detail.html', {'post': p, 'mailto': mailto })
def edit(request, post_hash):
p = get_object_or_404(Post, hash=post_hash)
# Update
if request.method == 'POST':
form = PostForm(request.POST, instance=p)
if form.is_valid():
p.set_isbn_int()
p.save()
return HttpResponseRedirect(reverse('timboektu.books.views.detail', kwargs={'post_id': p.id}))
# Edit
else:
form = PostForm(instance=p)
return render(request, 'edit.html', {
'form' : form,
'post' : p,
'delete' : DELETE_THRESHOLD
})
def new(request):
# Create
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
p = form.save()
p.hash = (os.urandom(16)).encode('hex')
p.set_isbn_int()
# Send edit link to user
send_mail(
'TimBoekTU edit link for ' + p.title,
render_to_string('emails/edit.html', {'post' : p}),
'services@timboektu.com',
[p.email],
fail_silently=True)
p.save()
return HttpResponseRedirect(reverse('timboektu.books.views.confirm', kwargs={'post_hash': p.hash}))
# New
else:
form = PostForm()
return render(request, 'edit.html', {
'form' : form,
'delete' : DELETE_THRESHOLD
})
def confirm(request, post_hash):
p = get_object_or_404(Post, hash=post_hash)
return render(request, 'confirm.html', {
'post' : p,
})
def renew(request, post_hash):
p = get_object_or_404(Post, hash=post_hash)
p.save() # Updates mdate, notified
return render(request, 'renew.html', {
'post' : p,
})
def delete(request):
post_hash = request.GET.get('hash')
p = get_object_or_404(Post, hash=post_hash)
p.delete()
return render(request, 'delete.html')
def about(request):
return render(request, 'about.html')
def contribute(request):
return render(request, 'contribute.html')
def locations(request):
return render(request, 'locations.html')
| phoxicle/timboektu | timboektu/books/views.py | Python | mit | 4,914 |
import os
import unicodedata
from keras.applications import InceptionV3, VGG16, MobileNet
image_type = ['.jpg']
def check_img(file_name):
"""To check image
Args:
file_name : file name
Returns:
if this file is image
"""
file_name, file_ext = os.path.splitext(file_name)
if file_ext in image_type:
return True
return False
def extract_type(file_name, delimiter):
"""To extract type from file name
Args:
file_name : face img name
delimiter : delimiter
Returns:
type: type of face
"""
file_name, ext = os.path.splitext(file_name)
type = file_name
if delimiter in type:
type = type.split(delimiter)[0]
type = unicodedata.normalize('NFC', type)
return type
def make_model(model, image_size):
if model == "inceptionv3":
base_model = InceptionV3(include_top=False, input_shape=image_size + (3,))
elif model == "vgg16" or model is None:
base_model = VGG16(include_top=False, input_shape=image_size + (3,))
elif model == "mobilenet":
base_model = MobileNet(include_top=False, input_shape=image_size + (3,))
return base_model
| seongahjo/Mosaicer | mosaicer/file_util.py | Python | mit | 1,185 |
from PyQt5.QtCore import *
from PyDesignData.PyDesignObject import *
from PyDesignModel.PyDesignCalcSheetsItem import PyDesignCalcSheetsItem
from PyDesignModel.PyDesignIcons import *
from PyDesignModel.PyDesignMaterialsItem import PyDesignMaterialsItem
from PyDesignModel.PyDesignModelItem import PyDesignModelItem
from PyDesignModel.PyDesignParametersItem import *
from PyDesignModel.PyDesignGeometriesItem import *
from PyDesignModel.PyDesignMeshesItem import *
from PyDesignModel.PyDesignSolversItem import PyDesignSolversItem
__author__ = 'magnus'
class PyDesignAnalysisItem(PyDesignModelItem):
def __init__(self, parent, py_design_analysis):
"""
:type py_design_analysis: PyDesignAnalysis
:param parent:
:param py_design_analysis:
:return:
"""
PyDesignModelItem.__init__(self, parent, parent.model)
self._data_object = py_design_analysis
self._data_dict[PyDesignNamedObject.NAME] = self.data_name
self._data_dict[PyDesignCommon.VALUE] = self.data_value
'''self._data_dict[PDP.size_temp] = self.data_size_temp
self._data_dict[PDP.medium_type] = self.data_medium_type
self._data_dict[PDP.size_pres] = self.data_size_pres'''
self._set_data_dict[PyDesignNamedObject.NAME] = self.set_data_name
self._icon = get_icon("analysis")
py_design_analysis.add_listener(self)
self._children.append(PyDesignParametersItem(py_design_analysis.properties, self))
self._children.append(PyDesignCalcSheetsItem(py_design_analysis, self))
self._children.append(PyDesignGeometriesItem(py_design_analysis, self))
self._children.append(PyDesignMeshesItem(py_design_analysis, self))
self._children.append(PyDesignMaterialsItem(py_design_analysis, self))
self._children.append(PyDesignSolversItem(py_design_analysis, self))
self._type = "PyDesignAnalysisModelItem"
self._context_menu = QMenu()
add_prop_menu = self._context_menu.addAction("Add property")
add_prop_menu.triggered.connect(self.on_add_property)
add_prop_menu = self._context_menu.addAction("Add calculation sheet")
add_prop_menu.triggered.connect(self.on_add_sheet)
add_prop_menu = self._context_menu.addAction("Add geometry")
add_prop_menu.triggered.connect(self.on_add_geometry)
add_prop_menu = self._context_menu.addAction("Add mesh")
add_prop_menu.triggered.connect(self.on_add_mesh)
add_prop_menu = self._context_menu.addAction("Add material")
add_prop_menu.triggered.connect(self.on_add_material)
add_prop_menu = self._context_menu.addAction("Delete analysis")
add_prop_menu.triggered.connect(self.on_delete)
def on_add_property(self):
PyDesignEventHandlers.on_add_parameter(self._data_object.properties)
def on_add_sheet(self):
PyDesignEventHandlers.on_add_sheet(self._data_object)
def on_add_geometry(self):
PyDesignEventHandlers.on_add_geometry(self._data_object, None)
def on_add_mesh(self):
pass
def on_add_material(self):
pass
def on_delete(self):
pass
def data_name(self, int_role):
if int_role == Qt.DisplayRole or int_role == Qt.EditRole:
return self._data_object.name
elif int_role == Qt.DecorationRole:
return self._icon
else:
return None
def set_data_name(self, int_role, data):
if int_role == Qt.EditRole:
self._data_object.name = data
return True
def data_value(self, int_role):
if int_role == Qt.DisplayRole:
type_name = "Unknown analysis"
type_name = "3D analysis" if self._data_object.analysis_type == 0 else type_name
type_name = "2D analysis" if self._data_object.analysis_type == 1 else type_name
type_name = "2D analysis axis symmetric" if self._data_object.analysis_type == 2 else type_name
return type_name
else:
return None
def data_size_pres(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.size_pres
else:
return None
def data_medium_type(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.medium_type
else:
return None
@staticmethod
def item_flags(int_pdp):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
if int_pdp == PyDesignNamedObject.NAME:
return default_flags | Qt.ItemIsEditable
else:
return default_flags
def on_context_menu(self, point):
self._context_menu.exec_(point)
def on_event(self, event):
"""
:type event: PyDesignEvent
:param event:
:return:
"""
self._model.on_item_changed(self)
if event.type == PyDesignEvent.EndItemAddedEvent:
pass
#new_item = PyDesignParameterItem(self, event.value)
#self.add_child(new_item)
return | pracedru/pyDesign | PyDesignModel/PyDesignAnalysisItem.py | Python | mit | 5,100 |
import arcpy, os, json, csv
from portal import additem, shareItem, generateToken, getUserContent, updateItem, getGroupID, deleteItem, getGroupContent
from metadata import metadata
from ESRImapservice import ESRImapservice
class csvportal(object):
def __init__(self, user, password, portal, worksspace, groups=[]):
"""Connect to portal with username and pasword, also set the local workspace"""
self.user = user
self.password = password
self.portal = portal
self.groups = groups
self.token = generateToken(self.user, self.password, self.portal)
self.groupIDs = [getGroupID(g, self.token, self.portal) for g in self.groups]
if len(self.groupIDs) == 0:
self.userContent = getUserContent(user, '', self.token, self.portal )
else:
self.userContent = getGroupContent(self.groups[0], self.token, self.portal)
self.existingIDs = { n['title'] : n['id'] for n in self.userContent["items"]}
self.LayersFoundinMXD = []
self.ws = worksspace
if worksspace: arcpy.env.workspace = worksspace
def updateToken(self):
"""refresh the token, might be necessary if becomes invalid"""
self.token = generateToken(self.user, self.password, self.portal)
return self.token
def uploadCsv(self, csvpath, sep=";", headerlines=1, nameCol=0, pathCol=1, urlCol=2):
"""upload every row in a csv"""
with open( csvpath , 'rb') as csvfile:
nr = 0
csv_reader = csv.reader(csvfile, dialect=csv.excel, delimiter=sep)
for n in range(headerlines): csv_reader.next()
for row in csv_reader:
line = [unicode(cell, 'latin-1') for cell in row]
name, ds, url = (line[nameCol], line[pathCol], line[urlCol])
if self.ws and os.path.dirname(ds).endswith('.sde'):
ds = os.path.join(self.ws , os.path.basename(ds) )
self.addLyr(ds, name, url, self.groupIDs)
#generate new token every 50 uses
if not nr%50 : self.token = generateToken(self.user, self.password, self.portal)
nr += 1
##TODO: DELETE layers in group and not in csv
def addLyr(self, dataSource, name, serviceUrl, groupIDs=[]):
"""Add *dataSource* to *portal* for *user* , as a item with *name*
representing a layer in *service* """
meta = metadata.metadataFromArcgis( dataSource )
author = meta.credits if len( meta.credits ) else "Stad Antwerpen"
descrip = ( "<strong>"+ meta.title +"</strong> <div><em>"+
meta.orgname + "</em></div> " + meta.description +
"\n<br/> Creatiedatum: " + meta.createDate +
"\n<br/> Publicatiedatum: " + meta.pubDate +
"\n<br/> Revisiedatum: " + meta.reviseDate +
"\n<br/> Beheer: " + meta.contacts +
"\n<br/> Contact: " + meta.eMails )
if name in self.existingIDs.keys():
self.LayersFoundinMXD.append(name)
arcpy.AddMessage( "updating " + name )
item = updateItem(self.user, self.token, self.portal, self.existingIDs[name], serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags))
else:
arcpy.AddMessage( "adding " + name )
item = additem(self.user, self.token, self.portal, serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags) )
if "success" in item.keys() and item["success"]:
id = item["id"]
arcpy.AddMessage( shareItem(id, self.token, self.portal, True, True, groupIDs) )
elif "success" in item.keys() and not item["success"]:
raise Exception( "Error uploading "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
def delLyr(self, name):
if name in self.existingIDs.keys():
result = deleteItem(self.existingIDs[name] , self.token, self.portal, self.user)
if "success" in result.keys() and result["success"]:
arcpy.AddMessage("Deleted layer: " + name )
elif "success" in result.keys() and not result["success"]:
raise Exception( "Error deleting "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result)) | warrieka/portal4argis_tools | portal/csvportal.py | Python | mit | 4,761 |
"""
This file holds the default values for the various programs.
"""
import sys
__all__ = ['defaults']
defaults = {
# filenames
'cache_file' : 'bin/cache.bin',
'log_file' : sys.stderr,
# values
'num_threads' : 16,
# flags
'debug' : False,
'https' : False,
'offline' : False,
'quiet' : False
}
| crypt3lx2k/Imageboard-Web-Interface | defaults.py | Python | mit | 363 |
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the DBCore database abstraction.
"""
import os
import shutil
import sqlite3
import unittest
from test import _common
from beets import dbcore
from tempfile import mkstemp
# Fixture: concrete database and model classes. For migration tests, we
# have multiple models with different numbers of fields.
class SortFixture(dbcore.query.FieldSort):
pass
class QueryFixture(dbcore.query.Query):
def __init__(self, pattern):
self.pattern = pattern
def clause(self):
return None, ()
def match(self):
return True
class ModelFixture1(dbcore.Model):
_table = 'test'
_flex_table = 'testflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.STRING,
}
_types = {
'some_float_field': dbcore.types.FLOAT,
}
_sorts = {
'some_sort': SortFixture,
}
_queries = {
'some_query': QueryFixture,
}
@classmethod
def _getters(cls):
return {}
def _template_funcs(self):
return {}
class DatabaseFixture1(dbcore.Database):
_models = (ModelFixture1,)
pass
class ModelFixture2(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
}
class DatabaseFixture2(dbcore.Database):
_models = (ModelFixture2,)
pass
class ModelFixture3(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
}
class DatabaseFixture3(dbcore.Database):
_models = (ModelFixture3,)
pass
class ModelFixture4(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
'field_four': dbcore.types.INTEGER,
}
class DatabaseFixture4(dbcore.Database):
_models = (ModelFixture4,)
pass
class AnotherModelFixture(ModelFixture1):
_table = 'another'
_flex_table = 'anotherflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'foo': dbcore.types.INTEGER,
}
class ModelFixture5(ModelFixture1):
_fields = {
'some_string_field': dbcore.types.STRING,
'some_float_field': dbcore.types.FLOAT,
'some_boolean_field': dbcore.types.BOOLEAN,
}
class DatabaseFixture5(dbcore.Database):
_models = (ModelFixture5,)
pass
class DatabaseFixtureTwoModels(dbcore.Database):
_models = (ModelFixture2, AnotherModelFixture)
pass
class ModelFixtureWithGetters(dbcore.Model):
@classmethod
def _getters(cls):
return {'aComputedField': (lambda s: 'thing')}
def _template_funcs(self):
return {}
@_common.slow_test()
class MigrationTest(unittest.TestCase):
"""Tests the ability to change the database schema between
versions.
"""
@classmethod
def setUpClass(cls):
handle, cls.orig_libfile = mkstemp('orig_db')
os.close(handle)
# Set up a database with the two-field schema.
old_lib = DatabaseFixture2(cls.orig_libfile)
# Add an item to the old library.
old_lib._connection().execute(
'insert into test (field_one, field_two) values (4, 2)'
)
old_lib._connection().commit()
del old_lib
@classmethod
def tearDownClass(cls):
os.remove(cls.orig_libfile)
def setUp(self):
handle, self.libfile = mkstemp('db')
os.close(handle)
shutil.copyfile(self.orig_libfile, self.libfile)
def tearDown(self):
os.remove(self.libfile)
def test_open_with_same_fields_leaves_untouched(self):
new_lib = DatabaseFixture2(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_new_field_adds_column(self):
new_lib = DatabaseFixture3(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture3._fields))
def test_open_with_fewer_fields_leaves_untouched(self):
new_lib = DatabaseFixture1(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_multiple_new_fields(self):
new_lib = DatabaseFixture4(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture4._fields))
def test_extra_model_adds_table(self):
new_lib = DatabaseFixtureTwoModels(self.libfile)
try:
new_lib._connection().execute("select * from another")
except sqlite3.OperationalError:
self.fail("select failed")
class TransactionTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_mutate_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.mutate(
'INSERT INTO {} '
'(field_one) '
'VALUES (?);'.format(ModelFixture1._table),
(111,),
)
self.assertGreater(self.db.revision, old_rev)
def test_query_no_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.query('PRAGMA table_info(%s)' % ModelFixture1._table)
self.assertEqual(self.db.revision, old_rev)
class ModelTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_add_model(self):
model = ModelFixture1()
model.add(self.db)
rows = self.db._connection().execute('select * from test').fetchall()
self.assertEqual(len(rows), 1)
def test_store_fixed_field(self):
model = ModelFixture1()
model.add(self.db)
model.field_one = 123
model.store()
row = self.db._connection().execute('select * from test').fetchone()
self.assertEqual(row['field_one'], 123)
def test_revision(self):
old_rev = self.db.revision
model = ModelFixture1()
model.add(self.db)
model.store()
self.assertEqual(model._revision, self.db.revision)
self.assertGreater(self.db.revision, old_rev)
mid_rev = self.db.revision
model2 = ModelFixture1()
model2.add(self.db)
model2.store()
self.assertGreater(model2._revision, mid_rev)
self.assertGreater(self.db.revision, model._revision)
# revision changed, so the model should be re-loaded
model.load()
self.assertEqual(model._revision, self.db.revision)
# revision did not change, so no reload
mod2_old_rev = model2._revision
model2.load()
self.assertEqual(model2._revision, mod2_old_rev)
def test_retrieve_by_id(self):
model = ModelFixture1()
model.add(self.db)
other_model = self.db._get(ModelFixture1, model.id)
self.assertEqual(model.id, other_model.id)
def test_store_and_retrieve_flexattr(self):
model = ModelFixture1()
model.add(self.db)
model.foo = 'bar'
model.store()
other_model = self.db._get(ModelFixture1, model.id)
self.assertEqual(other_model.foo, 'bar')
def test_delete_flexattr(self):
model = ModelFixture1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model['foo']
self.assertFalse('foo' in model)
def test_delete_flexattr_via_dot(self):
model = ModelFixture1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model.foo
self.assertFalse('foo' in model)
def test_delete_flexattr_persists(self):
model = ModelFixture1()
model.add(self.db)
model.foo = 'bar'
model.store()
model = self.db._get(ModelFixture1, model.id)
del model['foo']
model.store()
model = self.db._get(ModelFixture1, model.id)
self.assertFalse('foo' in model)
def test_delete_non_existent_attribute(self):
model = ModelFixture1()
with self.assertRaises(KeyError):
del model['foo']
def test_delete_fixed_attribute(self):
model = ModelFixture5()
model.some_string_field = 'foo'
model.some_float_field = 1.23
model.some_boolean_field = True
for field, type_ in model._fields.items():
self.assertNotEqual(model[field], type_.null)
for field, type_ in model._fields.items():
del model[field]
self.assertEqual(model[field], type_.null)
def test_null_value_normalization_by_type(self):
model = ModelFixture1()
model.field_one = None
self.assertEqual(model.field_one, 0)
def test_null_value_stays_none_for_untyped_field(self):
model = ModelFixture1()
model.foo = None
self.assertEqual(model.foo, None)
def test_normalization_for_typed_flex_fields(self):
model = ModelFixture1()
model.some_float_field = None
self.assertEqual(model.some_float_field, 0.0)
def test_load_deleted_flex_field(self):
model1 = ModelFixture1()
model1['flex_field'] = True
model1.add(self.db)
model2 = self.db._get(ModelFixture1, model1.id)
self.assertIn('flex_field', model2)
del model1['flex_field']
model1.store()
model2.load()
self.assertNotIn('flex_field', model2)
def test_check_db_fails(self):
with self.assertRaisesRegex(ValueError, 'no database'):
dbcore.Model()._check_db()
with self.assertRaisesRegex(ValueError, 'no id'):
ModelFixture1(self.db)._check_db()
dbcore.Model(self.db)._check_db(need_id=False)
def test_missing_field(self):
with self.assertRaises(AttributeError):
ModelFixture1(self.db).nonExistingKey
def test_computed_field(self):
model = ModelFixtureWithGetters()
self.assertEqual(model.aComputedField, 'thing')
with self.assertRaisesRegex(KeyError, 'computed field .+ deleted'):
del model.aComputedField
def test_items(self):
model = ModelFixture1(self.db)
model.id = 5
self.assertEqual({('id', 5), ('field_one', 0), ('field_two', '')},
set(model.items()))
def test_delete_internal_field(self):
model = dbcore.Model()
del model._db
with self.assertRaises(AttributeError):
model._db
def test_parse_nonstring(self):
with self.assertRaisesRegex(TypeError, "must be a string"):
dbcore.Model._parse(None, 42)
class FormatTest(unittest.TestCase):
def test_format_fixed_field_integer(self):
model = ModelFixture1()
model.field_one = 155
value = model.formatted().get('field_one')
self.assertEqual(value, '155')
def test_format_fixed_field_integer_normalized(self):
"""The normalize method of the Integer class rounds floats
"""
model = ModelFixture1()
model.field_one = 142.432
value = model.formatted().get('field_one')
self.assertEqual(value, '142')
model.field_one = 142.863
value = model.formatted().get('field_one')
self.assertEqual(value, '143')
def test_format_fixed_field_string(self):
model = ModelFixture1()
model.field_two = 'caf\xe9'
value = model.formatted().get('field_two')
self.assertEqual(value, 'caf\xe9')
def test_format_flex_field(self):
model = ModelFixture1()
model.other_field = 'caf\xe9'
value = model.formatted().get('other_field')
self.assertEqual(value, 'caf\xe9')
def test_format_flex_field_bytes(self):
model = ModelFixture1()
model.other_field = 'caf\xe9'.encode()
value = model.formatted().get('other_field')
self.assertTrue(isinstance(value, str))
self.assertEqual(value, 'caf\xe9')
def test_format_unset_field(self):
model = ModelFixture1()
value = model.formatted().get('other_field')
self.assertEqual(value, '')
def test_format_typed_flex_field(self):
model = ModelFixture1()
model.some_float_field = 3.14159265358979
value = model.formatted().get('some_float_field')
self.assertEqual(value, '3.1')
class FormattedMappingTest(unittest.TestCase):
def test_keys_equal_model_keys(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(set(model.keys(True)), set(formatted.keys()))
def test_get_unset_field(self):
model = ModelFixture1()
formatted = model.formatted()
with self.assertRaises(KeyError):
formatted['other_field']
def test_get_method_with_default(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field'), '')
def test_get_method_with_specified_default(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field', 'default'), 'default')
class ParseTest(unittest.TestCase):
def test_parse_fixed_field(self):
value = ModelFixture1._parse('field_one', '2')
self.assertIsInstance(value, int)
self.assertEqual(value, 2)
def test_parse_flex_field(self):
value = ModelFixture1._parse('some_float_field', '2')
self.assertIsInstance(value, float)
self.assertEqual(value, 2.0)
def test_parse_untyped_field(self):
value = ModelFixture1._parse('field_nine', '2')
self.assertEqual(value, '2')
class QueryParseTest(unittest.TestCase):
def pqp(self, part):
return dbcore.queryparse.parse_query_part(
part,
{'year': dbcore.query.NumericQuery},
{':': dbcore.query.RegexpQuery},
)[:-1] # remove the negate flag
def test_one_basic_term(self):
q = 'test'
r = (None, 'test', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_keyed_term(self):
q = 'test:val'
r = ('test', 'val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_colon_at_end(self):
q = 'test:'
r = ('test', '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_basic_regexp(self):
q = r':regexp'
r = (None, 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_keyed_regexp(self):
q = r'test::regexp'
r = ('test', 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon(self):
q = r'test\:val'
r = (None, 'test:val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon_in_regexp(self):
q = r':test\:regexp'
r = (None, 'test:regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_single_year(self):
q = 'year:1999'
r = ('year', '1999', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_multiple_years(self):
q = 'year:1999..2010'
r = ('year', '1999..2010', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_empty_query_part(self):
q = ''
r = (None, '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
class QueryFromStringsTest(unittest.TestCase):
def qfs(self, strings):
return dbcore.queryparse.query_from_strings(
dbcore.query.AndQuery,
ModelFixture1,
{':': dbcore.query.RegexpQuery},
strings,
)
def test_zero_parts(self):
q = self.qfs([])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 1)
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
def test_two_parts(self):
q = self.qfs(['foo', 'bar:baz'])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 2)
self.assertIsInstance(q.subqueries[0], dbcore.query.AnyFieldQuery)
self.assertIsInstance(q.subqueries[1], dbcore.query.SubstringQuery)
def test_parse_fixed_type_query(self):
q = self.qfs(['field_one:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_parse_flex_type_query(self):
q = self.qfs(['some_float_field:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_empty_query_part(self):
q = self.qfs([''])
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
def test_parse_named_query(self):
q = self.qfs(['some_query:foo'])
self.assertIsInstance(q.subqueries[0], QueryFixture)
class SortFromStringsTest(unittest.TestCase):
def sfs(self, strings):
return dbcore.queryparse.sort_from_strings(
ModelFixture1,
strings,
)
def test_zero_parts(self):
s = self.sfs([])
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(s, dbcore.query.NullSort())
def test_one_parts(self):
s = self.sfs(['field+'])
self.assertIsInstance(s, dbcore.query.Sort)
def test_two_parts(self):
s = self.sfs(['field+', 'another_field-'])
self.assertIsInstance(s, dbcore.query.MultipleSort)
self.assertEqual(len(s.sorts), 2)
def test_fixed_field_sort(self):
s = self.sfs(['field_one+'])
self.assertIsInstance(s, dbcore.query.FixedFieldSort)
self.assertEqual(s, dbcore.query.FixedFieldSort('field_one'))
def test_flex_field_sort(self):
s = self.sfs(['flex_field+'])
self.assertIsInstance(s, dbcore.query.SlowFieldSort)
self.assertEqual(s, dbcore.query.SlowFieldSort('flex_field'))
def test_special_sort(self):
s = self.sfs(['some_sort+'])
self.assertIsInstance(s, SortFixture)
class ParseSortedQueryTest(unittest.TestCase):
def psq(self, parts):
return dbcore.parse_sorted_query(
ModelFixture1,
parts.split(),
)
def test_and_query(self):
q, s = self.psq('foo bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_or_query(self):
q, s = self.psq('foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_space_before_comma_or_query(self):
q, s = self.psq('foo, bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_spaces_or_query(self):
q, s = self.psq('foo,bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
def test_trailing_comma_or_query(self):
q, s = self.psq('foo , bar ,')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_leading_comma_or_query(self):
q, s = self.psq(', foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_only_direction(self):
q, s = self.psq('-')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
class ResultsIteratorTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
model = ModelFixture1()
model['foo'] = 'baz'
model.add(self.db)
model = ModelFixture1()
model['foo'] = 'bar'
model.add(self.db)
def tearDown(self):
self.db._connection().close()
def test_iterate_once(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(len(list(objs)), 2)
def test_iterate_twice(self):
objs = self.db._fetch(ModelFixture1)
list(objs)
self.assertEqual(len(list(objs)), 2)
def test_concurrent_iterators(self):
results = self.db._fetch(ModelFixture1)
it1 = iter(results)
it2 = iter(results)
next(it1)
list(it2)
self.assertEqual(len(list(it1)), 1)
def test_slow_query(self):
q = dbcore.query.SubstringQuery('foo', 'ba', False)
objs = self.db._fetch(ModelFixture1, q)
self.assertEqual(len(list(objs)), 2)
def test_slow_query_negative(self):
q = dbcore.query.SubstringQuery('foo', 'qux', False)
objs = self.db._fetch(ModelFixture1, q)
self.assertEqual(len(list(objs)), 0)
def test_iterate_slow_sort(self):
s = dbcore.query.SlowFieldSort('foo')
res = self.db._fetch(ModelFixture1, sort=s)
objs = list(res)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_unsorted_subscript(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(objs[0].foo, 'baz')
self.assertEqual(objs[1].foo, 'bar')
def test_slow_sort_subscript(self):
s = dbcore.query.SlowFieldSort('foo')
objs = self.db._fetch(ModelFixture1, sort=s)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_length(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(len(objs), 2)
def test_out_of_range(self):
objs = self.db._fetch(ModelFixture1)
with self.assertRaises(IndexError):
objs[100]
def test_no_results(self):
self.assertIsNone(self.db._fetch(
ModelFixture1, dbcore.query.FalseQuery()).get())
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| beetbox/beets | test/test_dbcore.py | Python | mit | 23,723 |
import os
DEFAULT_CHARACTER_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'characters')
CHARACTER_PATH = os.environ.get('HR_CHARACTER_PATH', DEFAULT_CHARACTER_PATH)
RESET_SESSION_BY_HELLO = False
SESSION_REMOVE_TIMEOUT = 3600 # Timeout seconds for a session to be removed
CHATBOT_LOG_DIR = os.environ.get('CHATBOT_LOG_DIR') or os.path.expanduser('~/.hr/chatbot')
SERVER_LOG_DIR = os.environ.get('SERVER_LOG_DIR') or os.path.expanduser('~/.hr/log/chatbot')
HISTORY_DIR = os.path.join(CHATBOT_LOG_DIR, 'history')
TEST_HISTORY_DIR = os.path.join(CHATBOT_LOG_DIR, 'test/history')
CS_HOST = os.environ.get('CS_HOST') or 'localhost'
CS_PORT = os.environ.get('CS_PORT') or '1024'
CS_BOT = os.environ.get('CS_BOT') or 'rose'
HR_CHATBOT_AUTHKEY = os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC')
config = {}
config['DEFAULT_CHARACTER_PATH'] = DEFAULT_CHARACTER_PATH
config['CHARACTER_PATH'] = CHARACTER_PATH
config['RESET_SESSION_BY_HELLO'] = RESET_SESSION_BY_HELLO
config['SESSION_REMOVE_TIMEOUT'] = SESSION_REMOVE_TIMEOUT
config['CHATBOT_LOG_DIR'] = CHATBOT_LOG_DIR
config['SERVER_LOG_DIR'] = SERVER_LOG_DIR
config['HISTORY_DIR'] = HISTORY_DIR
config['CS_HOST'] = CS_HOST
config['CS_PORT'] = CS_PORT
config['CS_BOT'] = CS_BOT
config['HR_CHATBOT_AUTHKEY'] = HR_CHATBOT_AUTHKEY
| hansonrobotics/chatbot | src/chatbot/server/config.py | Python | mit | 1,300 |
# -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('dpf.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| a25kk/dpf | src/dpf.sitetheme/dpf/sitetheme/__init__.py | Python | mit | 217 |
# -*- coding: utf-8 -*-
import json
from enum import IntEnum
import requests
from wechatpy.client.api.base import BaseWeChatAPI
class FileType(IntEnum):
JSON = 1
CSV = 2
class ConflictMode(IntEnum):
INSERT = 1
UPSERT = 2
class WeChatCloud(BaseWeChatAPI):
API_BASE_URL = 'https://api.weixin.qq.com/'
def invoke_cloud_function(self, env, name, data=None):
"""
触发云函数
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/functions/invokeCloudFunction.html
:param env: 云开发环境 ID
:param name: 云函数名称
:param data: 云函数的传入参数,具体结构由开发者定义
"""
return self._post(
'tcb/invokecloudfunction',
params={
'env': env,
'name': name,
},
data=data,
result_processor=lambda x: json.loads(x['resp_data'])
)
def db_import(self, env, collection, file_path, file_type, conflict_mode, stop_on_error=True):
"""
数据库导入
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseMigrateImport.html
:param env: 云开发环境 ID
:param collection: 导入 collection 名称
:param file_path: 导入文件路径(导入文件需先上传到同环境的存储中,可使用开发者工具或 HTTP API的上传文件 API上传)
:param file_type: 导入文件类型,文件格式参考数据库导入指引中的文件格式部分,值为数字,1 为 JSON,2 为 CSV
:param stop_on_error: 是否在遇到错误时停止导入,默认为 True
:param conflict_mode: 冲突处理模式, 值为数字,1 为 INSERT,2 为 UPSERT
:return: 导入任务 ID,可使用数据库迁移进度查询 API 查询导入进度及结果
"""
return self._post(
'tcb/databasemigrateimport',
data={
'env': env,
'collection_name': collection,
'file_path': file_path,
'file_type': file_type,
'stop_on_error': stop_on_error,
'conflict_mode': conflict_mode,
},
result_processor=lambda x: x['job_id']
)
def db_export(self, env, file_path, file_type, query):
"""
数据库导出
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseMigrateExport.html
:param env: 云开发环境 ID
:param file_path: 导出文件路径(导入文件需先上传到同环境的存储中,可使用开发者工具或 HTTP API的上传文件 API上传)
:param file_type: 导出文件类型,文件格式参考数据库导入指引中的文件格式部分,值为数字,1 为 JSON,2 为 CSV
:param query: 导出条件
:return: 导出任务 ID,可使用数据库迁移进度查询 API 查询导出结果,获取文件下载链接
"""
return self._post(
'tcb/databasemigrateexport',
data={
'env': env,
'file_path': file_path,
'file_type': file_type,
'query': query,
},
result_processor=lambda x: x['job_id']
)
def db_query_migrate_info(self, env, job_id):
"""
数据库迁移状态查询
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseMigrateQueryInfo.html
:param env: 云开发环境 ID
:param job_id: 任务 ID
"""
return self._post(
'tcb/databasemigratequeryinfo',
data={
'env': env,
'job_id': job_id,
}
)
def db_update_index(self, env, collection, create_indexes=None, drop_indexes=None):
"""
变更数据库索引
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/updateIndex.html
:param env: 云开发环境 ID
:param collection: 导出 collection 名称
"""
assert create_indexes or drop_indexes
return self._post(
'tcb/updateindex',
data={
'env': env,
'collection_name': collection,
'create_indexes': create_indexes or [],
'drop_indexes': create_indexes or [],
}
)
def db_add_collection(self, env, collection):
"""
新增集合
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseCollectionAdd.html
:param env: 云开发环境 ID
:param collection: 集合名称
"""
return self._post(
'tcb/databasecollectionadd',
data={
'env': env,
'collection_name': collection,
}
)
def db_delete_collection(self, env, collection):
"""
删除集合
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseCollectionDelete.html
:param env: 云开发环境 ID
:param collection: 集合名称
"""
return self._post(
'tcb/databasecollectiondelete',
data={
'env': env,
'collection_name': collection,
}
)
def db_get_collection(self, env, offset=0, limit=10):
"""
获取特定云环境下集合信息
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseCollectionGet.html
:param env: 云开发环境 ID
:param offset: 偏移量,默认为 0
:param limit: 获取数量限制, 默认为 10
"""
return self._post(
'tcb/databasecollectionget',
data={
'env': env,
'offset': offset,
'limit': limit,
}
)
def db_add(self, env, query):
"""
数据库插入记录
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseAdd.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回插入成功的数据集合主键 _id 列表
"""
return self._post(
'tcb/databaseadd',
data={
'env': env,
'query': query,
},
result_processor=lambda x: x['id_list']
)
def db_delete(self, env, query):
"""
数据库删除记录
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseDelete.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回删除记录数量
"""
return self._post(
'tcb/databasedelete',
data={
'env': env,
'query': query,
},
result_processor=lambda x: x['deleted']
)
def db_update(self, env, query):
"""
数据更新记录
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseUpdate.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回的 JSON 数据包
"""
return self._post(
'tcb/databaseupdate',
data={
'env': env,
'query': query,
}
)
def db_query(self, env, query):
"""
数据库查询记录
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseQuery.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回的 JSON 数据包
"""
return self._post(
'tcb/databasequery',
data={
'env': env,
'query': query,
}
)
def db_aggregate(self, env, query):
"""
数据库聚合
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseAggregate.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回记录列表
"""
return self._post(
'tcb/databaseaggregate',
data={
'env': env,
'query': query,
},
result_processor=lambda x: x['data']
)
def db_count(self, env, query):
"""
统计集合记录数或统计查询语句对应的结果记录数
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/database/databaseCount.html
:param env: 云开发环境 ID
:param query: 数据库操作语句
:return: 返回记录数量
"""
return self._post(
'tcb/databasecount',
data={
'env': env,
'query': query,
},
result_processor=lambda x: x['count']
)
def upload_file(self, env, path):
"""
获取文件上传链接
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/storage/uploadFile.html
:param env: 云开发环境 ID
"""
with open(path, 'rb') as f:
res = self._post(
'tcb/uploadfile',
data={
'env': env,
'path': path,
}
)
signature = res['authorization']
token = res['token']
cos_file_id = res['cos_file_id']
upload_res = requests.post(
res['url'],
files={
'file': f,
'key': path,
'Signature': signature,
'x-cos-security-token': token,
'x-cos-meta-fileid': cos_file_id,
},
)
upload_res.raise_for_status()
return upload_res
def download_files(self, env, file_list):
"""
获取文件下载链接
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/storage/batchDownloadFile.html
:param env: 云开发环境 ID
:param file_list: 文件列表
:return: 返回文件列表
"""
return self._post(
'tcb/batchdownloadfile',
data={
'env': env,
'file_list': file_list,
},
result_processor=lambda x: x['file_list']
)
def delete_files(self, env, fileid_list):
"""
删除文件
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/storage/batchDeleteFile.html
:param env: 云开发环境 ID
:param fileid_list: 文件 ID 列表
:return: 被删除的文件列表
"""
return self._post(
'tcb/batchdeletefile',
data={
'env': env,
'fileid_list': fileid_list,
},
result_processor=lambda x: x['delete_list']
)
def get_qcloud_token(self, lifespan=7200):
"""
获取腾讯云 API 调用凭证
详情请参考
https://developers.weixin.qq.com/miniprogram/dev/wxcloud/reference-http-api/utils/getQcloudToken.html
"""
return self._post(
'tcb/getqcloudtoken',
data={'lifespan': lifespan}
)
| messense/wechatpy | wechatpy/client/api/cloud.py | Python | mit | 12,240 |
# Copyright (c) 2013 eGauge Systems LLC
# 4730 Walnut St, Suite 110
# Boulder, CO 80301
# voice: 720-545-9767
# email: davidm@egauge.net
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys, urllib2
from lxml import etree
class error (Exception):
pass
class PushData:
_tdesc = {
"P": {
"doc" : "Power",
"units" : ["W", "Ws"],
"scale" : 1,
},
"S": {
"doc" : "Apparent power",
"units" : ["VA", "VAs"],
"scale" : 1,
},
"V": {
"doc" : "Voltage",
"units" : ["V", "Vs"],
"scale" : 1e-3,
},
"I": {
"doc" : "Current",
"units" : ["A", "As"],
"scale" : 1e-3,
},
"F": {
"doc" : "Frequency",
"units" : ["Hz", "Hzs"],
"scale" : 1e-3,
},
"THD": {
"doc" : "Total Harmonic Distortion",
"units" : ["%", "%s"],
"scale" : 1e-3,
},
"T": {
"doc" : "Temperature",
"units" : ["C", "Cs"],
"scale" : 1e-3,
},
"Q": {
"doc" : "Mass flow-rate",
"units" : ["g/s", "g"],
"scale" : 1e-3,
},
"v": {
"doc" : "Speed",
"units" : ["m/s", "m"],
"scale" : 1e-3,
},
"R": {
"doc" : "Resistance",
"units" : ["Ohm", "Ohm*s"],
"scale" : 1,
},
"Ee": {
"doc" : "Irradiance",
"units" : ["W/m^2", "W/m^2*s"],
"scale" : 1,
},
"PQ": {
"doc" : "Reactive power",
"units" : ["VAr", "VArh"],
"scale" : 1,
},
"$": {
"doc" : "Money",
"units" : ["$", "$s"],
"scale" : 1,
},
"a": {
"doc" : "Angle",
"units" : ["DEG", "DEGs"],
"scale" : 1,
},
"h": {
"doc" : "Humidity",
"units" : ["%", "%s"],
"scale" : 1e-1,
},
"Qv": {
"doc" : "Volumetric flow-rate",
"units" : ["m^3/s", "m^3"],
"scale" : 1e-9,
},
"Pa": {
"doc" : "Pressure",
"units" : ["Pa", "Pa*s"],
"scale" : 1,
}
}
def __init__ (self, xml_string):
self.config_serial_number = None
self.num_registers = 0
self.regname = []
self.regtype = []
self.ts = []
self.row = []
xml = etree.fromstring (xml_string)
if xml.tag != 'group':
raise error, ('Expected <group> as the top element')
self.config_serial_number = int (xml.attrib['serial'], 0)
for data in xml:
ts = None
delta = None
if data.tag != 'data':
raise error, ('Expected <data> elements within <group>')
if 'columns' in data.attrib:
self.num_registers = int (data.attrib['columns'])
if 'time_stamp' in data.attrib:
ts = long (data.attrib['time_stamp'], 0)
if 'time_delta' in data.attrib:
delta = long (data.attrib['time_delta'], 0)
if 'epoch' in data.attrib:
self.epoch = int (data.attrib['epoch'], 0)
if ts == None:
raise error, ('<data> element is missing time_stamp attribute')
if delta == None:
raise error, ('<data> element is missing time_delta attribute')
for el in data:
if el.tag == 'r':
row = []
for c in el:
row.append (long (c.text))
self.ts.append (ts)
self.row.append (row)
ts -= delta
elif el.tag == 'cname':
t = "P"
if 't' in el.attrib:
t = el.attrib['t']
self.regname.append (el.text)
self.regtype.append (t)
return
def __str__ (self):
ret = ""
ret += "serial # = %d, " % self.config_serial_number
ret += "names = %s, " % self.regname
ret += "types = %s, rows=[" % self.regtype
for i in range (len (self.ts)):
if i > 0:
ret += ", "
ret += "0x%08x, " % self.ts[i]
ret += "%s" % self.row[i]
ret += "]"
return ret
| thecardcheat/egauge-api-examples | python/eGauge.py | Python | mit | 5,735 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
from __future__ import division, absolute_import, print_function
import musicbrainzngs
import re
import traceback
from six.moves.urllib.parse import urljoin
from beets import logging
import beets.autotag.hooks
import beets
from beets import util
from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
musicbrainzngs.set_useragent('beets', beets.__version__,
'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self):
return u'{0} in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
aliases = [a for a in aliases if 'locale' in a]
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, six.string_types):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el['artist'].get('alias-list', ()))
# An artist.
if alias:
cur_artist_name = alias['alias']
else:
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias['sort-name'])
elif 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
recording['title'],
recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
track['recording'],
index,
int(medium['position']),
int(track['position']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
ti.title = track['title']
if track.get('artist-credit'):
# Get the artist names.
ti.artist, ti.artist_sort, ti.artist_credit = \
_flatten_artist_credit(track['artist-credit'])
ti.artist_id = track['artist-credit'][0]['artist']['id']
if track.get('length'):
ti.length = int(track['length']) / (1000.0)
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
release['title'],
release['id'],
artist_name,
release['artist-credit'][0]['artist']['id'],
track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
data_source=u'MusicBrainz',
data_url=album_url(release['id']),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status')
# Get the disambiguation strings at the release and release group level.
if release['release-group'].get('disambiguation'):
info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'):
info.albumdisambig = release.get('disambiguation')
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Log the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
info.decode()
return info
def match_album(artist, album, tracks=None):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album.
"""
# Build search criteria.
criteria = {'release': album.lower().strip()}
if artist is not None:
criteria['artist'] = artist.lower().strip()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = six.text_type(tracks)
# Abort if we have no search terms.
if not any(criteria.values()):
return
try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower().strip(),
'recording': title.lower().strip(),
}
if not any(criteria.values()):
return
try:
res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
| SusannaMaria/beets | beets/autotag/mb.py | Python | mit | 18,620 |
import sys
def get_civ_leader(civ_leader_file):
"""Reads in a file with civs mapped to leaders and add it to a dict.
"""
return {leader.strip('\n'): country
for line in civ_leader_file
for (country, leader) in [line.split('\t')]}
def get_all_names(civ_leader):
"""Reads in all leader names in the leader_civ dict to filter narrator text.
"""
return[k for keys in civ_leader for k in keys.split()]
def find_best_leader_match(input_lines):
"""Return best leader according to input.
Finds the best matched leader name for inputted list of words (containing
at least one leadername) (useful when narrator use shortned leader names
and more leaders share some of their name (Khan as an example)."""
best_match = 0
matched_key = None
for leader in civ_leader.keys():
matches = 0
for split_name in leader.split():
for split_input in input_lines:
if(split_input == split_name):
matches+=1
if(matches>best_match):
matched_key = leader
best_match = matches
elif(matches==best_match and matches!=0):
matched_key = None
if(matched_key is not None):
return civ_leader[matched_key]
def insert_civ_names(input_lines, all_names):
"""Inserts civ names in parenthesis.
Reads in a text file from narrators and searches for leader names and adds
civ in brackets"""
out = []
for line in input_lines:
new_line = []
split_line = line.split(' ')
start_word_num = 0
word_num = 0
while word_num < len(split_line):
word=split_line[word_num]
if(word[-1] in '.,:;?!+-='):
punct = word[-1]
word = word[:-1]
else:
punct = ''
w = 0
leader = []
if(word in all_names and word != 'I'):
while(word in all_names):
leader.append(word)
w += 1
word = split_line[word_num + w]
civ = find_best_leader_match(leader)
if civ is not False:
new_line.extend(
(' '.join(split_line[start_word_num:word_num]),
' {} ({}){} '.format(' '.join(leader), civ, punct)))
start_word_num = word_num + len(leader)
word_num = word_num + len(leader)
else:
word_num += 1
else:
word_num += 1
new_line.append(' '.join(split_line[start_word_num:]))
out.append(''.join(new_line))
return(''.join(out))
def print_help():
print (
'For Civilization Battle Royal Mk.II community at '
'reddit/r/civbattleroyale - Flair up!\n'
'This python script takes in a plain text file as the only argument.\n'
'It adds civilization names in brackets to leader names (from the '
'civBR_civ_leader.tsv).\n'
'Outputs a new text-file with a suffix: "_with_civs".\n'
'Made by vincentrose88')
if __name__ == '__main__':
if len(sys.argv) == 1:
print_help()
exit(0)
elif(sys.argv[1] in ('-h', '--help', None)):
print_help()
exit(0)
elif(sys.argv[1]=='-t' or sys.argv[1]=='--test'):
input_file = 'data/test_data.txt'
else:
input_file = str(sys.argv[1])
input_lines = open(input_file,'r').readlines()
civ_leader_file = open('civBR_civ_leader.tsv','r')
civ_leader = get_civ_leader(civ_leader_file)
all_names = get_all_names(civ_leader)
output = insert_civ_names(input_lines, all_names)
updated_file = open(input_file + '_with_civs', 'w')
updated_file.write(output)
| vincentrose88/civAdder | civ_battleroyal_leader_civ_adder.py | Python | mit | 3,793 |
class UserInfoModel(object):
PartenaireID = 0
Mail = ""
CodeUtilisateur = ""
TypeAbonnement = ""
DateExpiration = ""
DateSouscription = ""
AccountExist = False
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def create_dummy_model(self):
self.Mail = "dummy@gmail.com"
self.CodeUtilisateur = "dummy1234"
self.AccountExist = True
self.PartenaireID = 0
| NextINpact/LaPresseLibreSDK | python_django/sdk_lpl/models/UserInfosModel.py | Python | mit | 439 |
# Copyright 2016 Amazon Web Services, Inc. or its affiliates. All Rights Reserved.
# This file is licensed to you under the AWS Customer Agreement (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at http://aws.amazon.com/agreement/ .
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from botocore.vendored import requests
import json
SUCCESS = "SUCCESS"
FAILED = "FAILED"
def send(event, context, responseStatus, responseData, physicalResourceId=None):
responseUrl = event['ResponseURL']
print responseUrl
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = 'See the details in CloudWatch Log Stream: ' + context.log_stream_name
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['Data'] = responseData
json_responseBody = json.dumps(responseBody)
print "Response body:\n" + json_responseBody
headers = {
'content-type' : '',
'content-length' : str(len(json_responseBody))
}
try:
response = requests.put(responseUrl,
data=json_responseBody,
headers=headers)
print "Status code: " + response.reason
except Exception as e:
print "send(..) failed executing requests.put(..): " + str(e) | moduspwnens/boa-chat | boa-nimbus/lambda-pip-modules/cfnresponse/cfnresponse/__init__.py | Python | mit | 1,738 |
import operator
from django.db import models
from django.db.models import Q
from django.db.models import Count
from caching.base import CachingManager, CachingMixin
from emoticonvis.apps.base import models as base_models
from emoticonvis.apps.corpus import utils
import numpy
class Dataset(models.Model):
"""A top-level dataset object containing messages."""
name = models.CharField(max_length=150)
"""The name of the dataset"""
description = models.TextField()
"""A description of the dataset."""
created_at = models.DateTimeField(auto_now_add=True)
"""The :py:class:`datetime.datetime` when the dataset was created."""
start_time = models.DateTimeField(null=True, default=None, blank=True)
"""The time of the first real message in the dataset"""
end_time = models.DateTimeField(null=True, default=None, blank=True)
"""The time of the last real message in the dataset"""
@property
def message_count(self):
return self.message_set.count()
def __unicode__(self):
return self.name
def get_messages_from_selected_participants(self):
return self.messages.filter(participant__is_selected=True).distinct()
def get_emoticons_from_selected_participants(self):
return Emoticon.objects.filter(messages__participant__is_selected=True).distinct()
class Emoticon(models.Model):
"""A code of a message"""
text = base_models.Utf8CharField(max_length=200)
"""The text of the emoticon"""
VALENCE_CHOICES = (
('P', 'Positive'),
('N', 'Negative'),
('O', 'Neutral'),
('U', 'Unknown'),
)
valence = models.CharField(max_length=1, choices=VALENCE_CHOICES, default='U')
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
class Participant(models.Model):
"""A code of a message"""
dataset = models.ForeignKey(Dataset, default=1)
"""Which :class:`Dataset` the message belongs to"""
name = models.CharField(max_length=100, blank=True)
"""The name of the participant"""
LANG_CHOICES = (
('No', 'Not specified'),
('En', 'English'),
('Fr', 'French'),
)
language = models.CharField(max_length=2, choices=LANG_CHOICES, default='No')
STATUS_CHOICES = (
('No', 'Not specified'),
('Jr', 'Junior'),
('Sr', 'Senior'),
)
status = models.CharField(max_length=2, choices=STATUS_CHOICES, default='No')
position = models.CharField(max_length=32, default=None, null=True)
is_selected = models.BooleanField(default=True)
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
class LanguageSession(models.Model):
"""
A language session is a continuous time period when participants in the session stay the same
"""
dataset = models.ForeignKey(Dataset)
"""Which :class:`Dataset` the message belongs to"""
start_time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the language session starts"""
end_time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the language session ends"""
participants = models.ManyToManyField(Participant, related_name="lang_sessions")
num_en = models.IntegerField(default=0)
num_fr = models.IntegerField(default=0)
en_proportion = models.FloatField(default=0)
TYPE_CHOICES = (
('E only', 'E only'),
('major E', 'major E'),
('major F', 'major F'),
('F only', 'F only'),
('Empty', 'Empty')
)
type = models.CharField(max_length=8, choices=TYPE_CHOICES, default=None, null=True)
class Message(models.Model):
"""
The Message is the central data entity for the dataset.
"""
dataset = models.ForeignKey(Dataset)
"""Which :class:`Dataset` the message belongs to"""
idx = models.IntegerField(null=True, blank=True, default=None)
"""The index of the message"""
time = models.DateTimeField(null=True, blank=True, default=None)
"""The :py:class:`datetime.datetime` (in UTC) when the message was sent"""
session_id = models.IntegerField(null=True, blank=True, default=None)
"""The session of the message"""
TYPE_CHOICES = (
(0, 'Normal message'),
(1, 'Someone joined'),
(2, 'Someone left'),
(3, 'Bert message'),
(4, 'Starting log'),
)
type = models.IntegerField(max_length=1, choices=TYPE_CHOICES, default=0)
participant = models.ForeignKey(Participant, related_name="messages", default=None, null=True)
text = base_models.Utf8TextField(null=True, blank=True, default="")
"""The actual text of the message."""
emoticons = models.ManyToManyField(Emoticon, related_name="messages")
lang_session = models.ForeignKey(LanguageSession, related_name="messages", default=None, null=True)
LANG_CHOICES = (
('No', 'Not specified'),
('En', 'English'),
('Fr', 'French'),
)
detected_language = models.CharField(max_length=2, choices=LANG_CHOICES, default='No')
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
| nanchenchen/emoticon-analysis | emoticonvis/apps/corpus/models.py | Python | mit | 5,332 |
from nider.core import Font
from nider.core import Outline
from nider.models import Header
from nider.models import Paragraph
from nider.models import Linkback
from nider.models import Content
from nider.models import TwitterPost
# TODO: change this fontpath to the fontpath on your machine
roboto_font_folder = '/home/ovd/.local/share/fonts/Roboto/'
outline = Outline(2, '#121212')
header = Header(text='Your super interesting title!',
font=Font(roboto_font_folder + 'Roboto-Bold.ttf', 30),
text_width=40,
align='left',
color='#ededed'
)
para = Paragraph(text='Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.',
font=Font(roboto_font_folder + 'Roboto-Medium.ttf', 29),
text_width=65,
align='left',
color='#ededed'
)
linkback = Linkback(text='foo.com | @username',
font=Font(roboto_font_folder + 'Roboto-Bold.ttf', 24),
color='#ededed'
)
content = Content(para, header, linkback)
img = TwitterPost(content,
fullpath='result.png'
)
# TODO: change this texture path to the texture path on your machine
img.draw_on_texture('texture.png')
| pythad/nider | examples/draw_on_texture_example/script.py | Python | mit | 1,593 |
# -*- coding: utf-8 -*-
"""
MicroSAN Network Module
"""
import socket, traceback
IPADDR_BROADCAST = '<broadcast>'
IPADDR_ANY = ''
SOCK_BUF_SIZE = 8192
SOCK_TIMEOUT = 1
MICROSAN_PORT = 20001
APP_PORT = 51000
class UDPSocket(object):
def __init__(self, addr = None, reuse_addr = False):
"""
Create socket for use
"""
self.buf = []
if addr is None:
addr = (IPADDR_ANY, APP_PORT)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if reuse_addr:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if addr[0] == IPADDR_ANY:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.settimeout(SOCK_TIMEOUT)
self.sock.bind(addr)
def send_data(self, data, addr):
"""
Sends data
"""
self.sock.sendto(data, addr)
def recv_data(self):
"""
Wait for data
"""
try:
message, address = self.sock.recvfrom(SOCK_BUF_SIZE)
self.buf.append((message, address))
except (KeyboardInterrupt, SystemExit):
raise
# except:
# traceback.print_exc()
def get_next_buf(self):
"""
Return data from buffer
"""
return self.buf.pop(0)
def close(self):
"""
Closes socket
"""
self.sock.close()
def clear_buf(self):
"""
Clears buffer
"""
self.buf = []
class BroadcastSocket(UDPSocket):
def __init__(self, addr = None, reuse_addr = True):
"""
Create socket for use
"""
super(BroadcastSocket, self).__init__(addr = addr, reuse_addr = reuse_addr)
def send_data(self, data, addr = None):
"""
Sends broadcast
"""
if addr is None:
addr = (IPADDR_BROADCAST, MICROSAN_PORT)
super(BroadcastSocket, self).send_data(data, addr)
| ktomala/microsan | src/network.py | Python | mit | 1,992 |
import plugins, datetime, time, os
class BatchVersionFilter:
def __init__(self, batchSession):
self.batchSession = batchSession
def verifyVersions(self, app):
badVersion = self.findUnacceptableVersion(app)
if badVersion is not None:
raise plugins.TextTestError, "unregistered version '" + badVersion + "' for " + self.batchSession + " session."
def findUnacceptableVersion(self, app):
if app.getCompositeConfigValue("batch_use_version_filtering", self.batchSession) != "true":
return
allowedVersions = app.getCompositeConfigValue("batch_version", self.batchSession)
for version in app.versions:
if len(version) > 0 and version not in allowedVersions and not version.startswith("copy_"):
return version
def calculateBatchDate():
# Batch mode uses a standardised date that give a consistent answer for night-jobs.
# Hence midnight is a bad cutover point. The day therefore starts and ends at 8am :)
timeToUse = plugins.globalStartTime - datetime.timedelta(hours=8)
return timeToUse.strftime("%d%b%Y")
def parseFileName(fileName, diag):
versionStr = fileName[5:-5]
components = versionStr.split("_")
diag.info("Parsing file with components " + repr(components))
for index, component in enumerate(components[1:]):
try:
diag.info("Trying to parse " + component + " as date")
date = time.strptime(component, "%d%b%Y")
version = "_".join(components[:index + 1])
tag = "_".join(components[index + 2:]) or component
return version, date, tag
except ValueError:
pass
return None, None, None
def convertToUrl(path, fileMapping):
for filePath, httpPath in fileMapping.items():
if path.startswith(filePath):
return path.replace(filePath, httpPath)
return "file://" + os.path.abspath(path)
| emilybache/texttest-runner | src/main/python/lib/default/batch/batchutils.py | Python | mit | 1,956 |
import sublime
import sublime_plugin
try:
from .sbtsettings import SBTSettings
from .util import maybe, OnePerWindow
except(ValueError):
from sbtsettings import SBTSettings
from util import maybe, OnePerWindow
import re
class SbtView(OnePerWindow):
settings = {
"line_numbers": False,
"gutter": False,
"rulers": [],
"word_wrap": False,
"draw_centered": False,
"highlight_line": False
}
@classmethod
def is_sbt_view(cls, view):
if view is not None:
for window in maybe(view.window()):
sbt_view = cls(window)
return sbt_view.panel.id() == view.id()
def __init__(self, window):
self.window = window
self.settings = SBTSettings(window)
self.panel = self.window.get_output_panel('sbt')
self.panel.set_syntax_file("Packages/SublimeSBT/SBTOutput.hidden-tmLanguage")
for name, setting in SbtView.settings.items():
self.panel.settings().set(name, setting)
self._update_panel_colors()
self.settings.add_on_change(self._update_panel_colors)
self._output_size = 0
self._set_running(False)
def start(self):
self.clear_output()
self.show()
self._set_running(True)
def finish(self):
self.show_output('\n -- Finished --\n')
self._set_running(False)
def show(self):
self._update_panel_colors()
self.window.run_command('show_panel', {'panel': 'output.sbt'})
sublime.set_timeout(self._show_selection, 0)
def hide(self):
self.window.run_command('hide_panel', {'panel': 'output.sbt'})
def focus(self):
self.window.focus_view(self.panel)
self.panel.show(self.panel.size())
def show_output(self, output):
output = self._clean_output(output)
self.show()
self._append_output(output)
self._output_size = self.panel.size()
self.panel.show(self._output_size)
self.panel.sel().clear()
self.panel.sel().add(sublime.Region(self._output_size, self._output_size))
def clear_output(self):
self._erase_output(sublime.Region(0, self.panel.size()))
def take_input(self):
input_region = sublime.Region(self._output_size, self.panel.size())
input = self.panel.substr(input_region)
if sublime.platform() == 'windows':
self._append_output('\n')
else:
self._erase_output(input_region)
return input
def delete_left(self):
if self.panel.sel()[0].begin() > self._output_size:
self.panel.run_command('left_delete')
def delete_bol(self):
if self.panel.sel()[0].begin() >= self._output_size:
p = self.panel.sel()[-1].end()
self._erase_output(sublime.Region(self._output_size, p))
def delete_word_left(self):
if self.panel.sel()[0].begin() > self._output_size:
for r in self.panel.sel():
p = max(self.panel.word(r).begin(), self._output_size)
self.panel.sel().add(sublime.Region(p, r.end()))
self._erase_output(*self.panel.sel())
def delete_word_right(self):
if self.panel.sel()[0].begin() >= self._output_size:
for r in self.panel.sel():
p = self.panel.word(r).end()
self.panel.sel().add(sublime.Region(r.begin(), p))
self._erase_output(*self.panel.sel())
def update_writability(self):
self.panel.set_read_only(not self._running or
self.panel.sel()[0].begin() < self._output_size)
def _set_running(self, running):
self._running = running
self.update_writability()
def _append_output(self, output):
self._run_command('sbt_append_output', output=output)
def _erase_output(self, *regions):
self._run_command('sbt_erase_output',
regions=[[r.begin(), r.end()] for r in regions])
def _run_command(self, name, **kwargs):
self.panel.set_read_only(False)
self.panel.run_command(name, kwargs)
self.update_writability()
def _clean_output(self, output):
return self._strip_codes(self._normalize_lines(output))
def _normalize_lines(self, output):
return output.replace('\r\n', '\n').replace('\033M', '\r')
def _show_selection(self):
self.panel.show(self.panel.sel()[0].begin(), True)
def _strip_codes(self, output):
return re.sub(r'\033\[[0-9;]+[mK]', '', output)
def _update_panel_colors(self):
self.panel.settings().set('color_scheme', self.settings.get('color_scheme'))
class SbtAppendOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, output):
for i, s in enumerate(output.split('\r')):
if i > 0:
self.view.replace(edit, self.view.line(self.view.size()), s)
else:
self.view.insert(edit, self.view.size(), s)
class SbtEraseOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, regions):
for a, b in reversed(regions):
self.view.erase(edit, sublime.Region(int(a), int(b)))
| jarhart/SublimeSBT | sbtview.py | Python | mit | 5,229 |
from interval import interval, inf, imath, fpu
from complexinterval import ComplexInterval, _one, _zero
from complexpolynomial import ComplexPolynomial
class Newton:
def __init__(self, start, poly):
self.start = start
self.poly = poly
self.iterates = 0
self.deriv = poly.derive()
self.step = start
def iterate(self):
"""
Performs one Newton iteration, returns change between values.
"""
self.iterates += 1
x = self.step.midpoint()
fx = self.poly(x)
## iterate on derivative
## self.deriv = self.deriv.derive()
self.step = x - (fx / self.deriv(x))
## return the change
diff = x - self.step
return diff
def iterate_until(self, res = 10**-6, max_iterates = 20):
"""
Iterates until at resolution or until maximum number
of iterations has been reached. Returns True if convergence
achieved, returns False otherwise.
"""
res_box = ComplexInterval(interval([res, -res]), interval([res, -res]))
while (self.iterates < max_iterates - 1):
if self.iterate() in res_box:
return True
if self.iterate() in res_box:
return True
return False
def __str__(self):
"""
Returns string representation
"""
return "Newton's Iterator\n" + "Start: " + str(self.start) + "\nFunction: " + str(self.poly)
def main():
print("Testing Newton")
print("Testing Complex Polynomials")
print("----------------------------")
xa = interval([1, 2])
xb = interval([5, 6])
x = ComplexInterval(xa, xb)
ya = interval([4, 7])
yb = interval([2, 3])
y = ComplexInterval(ya, yb)
wa = interval([2, 2])
wb = interval([3, 3])
w = ComplexInterval(wa, wb)
za = interval([4, 4])
zb = interval([5, 5])
z = ComplexInterval(za, zb)
a_0_a = interval([1, 1])
a_0_b = interval([5, 5])
a_0 = ComplexInterval(a_0_a, a_0_b)
a_1_a = interval([1, 1])
a_1_b = interval([5, 5])
a_1 = ComplexInterval(a_1_a, a_1_b)
a_2_a = interval([3, 3])
a_2_b = interval([2, 2])
a_2 = ComplexInterval(a_2_a, a_2_b)
a_3_a = interval([7, 7])
a_3_b = interval([-4, -4])
a_3 = ComplexInterval(a_3_a, a_3_b)
a_4_a = interval([-6, -6])
a_4_b = interval([1, 1])
a_4 = ComplexInterval(a_4_a, a_4_b)
a_5 = ComplexInterval(interval([2]), interval([0]))
a_6 = ComplexInterval(interval([2]), interval([0]))
coeffs = [a_0, a_1, a_2, a_3, a_4, a_5, a_6]
print("Testing Complex Constructor")
print("----------------------------")
poly_1 = ComplexPolynomial(coeffs)
print(poly_1)
poly_2 = ComplexPolynomial([_zero(), a_4])
print(poly_2)
poly_3 = ComplexPolynomial([a_5, a_6, a_3, a_1, a_0])
print(poly_3)
print("============================")
print("Testing Evaluation")
print("----------------------------")
print(poly_1(w))
print(poly_1(_one()))
print(poly_1(_zero()))
print("")
print(poly_2(w))
print(poly_2(_one()))
print(poly_2(_zero()))
print("")
print(poly_3(w))
print(poly_3(_one()))
print(poly_3(_zero()))
print("============================")
print("Derivation")
print("----------------------------")
print(poly_1.derive())
print(poly_1.derive().derive())
print(poly_1.derive().derive().derive())
print("")
print(poly_2.derive())
print(poly_2.derive().derive())
print("")
print(poly_3.derive())
print(poly_3.derive().derive())
print("============================")
print("Newton's Method Constructor")
print("----------------------------")
start1 = ComplexInterval(interval([0]), interval([0]))
start2 = ComplexInterval(interval([1]), interval([1]))
start3 = ComplexInterval(interval([0]), interval([0]))
n_1 = Newton(start1, poly_1)
n_2 = Newton(start2, poly_2)
n_3 = Newton(start3, poly_3)
print(n_1)
print("")
print(n_2)
print("")
print(n_3)
print("")
print("============================")
print("Testing Iteration")
print("----------------------------")
for i in range(10):
print(n_1.iterate())
print("----------------------------")
for i in range(10):
print(n_2.iterate())
print("----------------------------")
for i in range(10):
print(n_3.iterate())
# print(fpu.isnan(n_3.iterate().a))
print("============================")
print("Testing convergence")
print("----------------------------")
print(n_1.iterate_until())
print("----------------------------")
print(n_2.iterate_until())
print("----------------------------")
print(n_3.iterate_until())
# print(fpu.isnan(n_3.iterate().a))
print("============================")
if __name__=="__main__":
main() | yuanagain/seniorthesis | src/intervals/newton.py | Python | mit | 4,396 |
"""Debouncing provides functions for delaying or skipping a queue `push`
subject to a specified debouncing constraint. When used with idempotent
operations, this provides a safe and consistent method of throttling
queue pushes within Deferrable itself.
The debouncing constraint is defined as follows:
If `debounce_always_delay` is `False`, items should be made available for
execution as quickly as possible subject to the constraint that the same
item be made available at most once per `debounce_seconds` seconds.
If 'debounce_always_delay` is `True`, the item will be always either be
skipped (debounced) or delayed by the full `debounce_seconds` amount. The
constraint that the item is processed at most once per `debounce_seconds` seconds
still holds."""
import math
import time
class DebounceStrategy(object):
PUSH_NOW = 1
PUSH_DELAYED = 2
SKIP = 3
def _debounce_key(item):
return u"debounce.{}.{}.{}".format(item['method'], item['args'], item['kwargs'])
def _last_push_key(item):
return u"last_push.{}.{}.{}".format(item['method'], item['args'], item['kwargs'])
def set_debounce_keys_for_push_now(redis_client, item, debounce_seconds):
"""Set a key in Redis indicating the last time this item was potentially
available inside a non-delay queue. Expires after 2*delay period to
keep Redis clean. The 2* ensures that the key would have been stale at
the period it is reaped."""
redis_client.set(_last_push_key(item), time.time(), px=int(2*debounce_seconds*1000))
def set_debounce_keys_for_push_delayed(redis_client, item, seconds_to_delay, debounce_seconds):
redis_client.scripts.set_debounce_keys(keys=[_last_push_key(item),
_debounce_key(item)],
args=[time.time(), seconds_to_delay, debounce_seconds])
def get_debounce_strategy(redis_client, item, debounce_seconds, debounce_always_delay):
last_push_time, debounce_value = redis_client.scripts.get_debounce_keys(keys=[_last_push_key(item),
_debounce_key(item)])
if debounce_value:
return DebounceStrategy.SKIP, 0
if debounce_always_delay:
return DebounceStrategy.PUSH_DELAYED, debounce_seconds
if not last_push_time:
return DebounceStrategy.PUSH_NOW, 0
seconds_since_last_push = time.time() - float(last_push_time)
if seconds_since_last_push > debounce_seconds:
return DebounceStrategy.PUSH_NOW, 0
return DebounceStrategy.PUSH_DELAYED, math.ceil(debounce_seconds - seconds_since_last_push)
| gamechanger/deferrable | deferrable/debounce.py | Python | mit | 2,641 |
#!/usr/bin/env python
from TweetGrabber import * | weilneb/twitter-utils | twutils/__init__.py | Python | mit | 48 |
from __future__ import print_function
from mnist import evaluate
import sys
model = evaluate.init()
prediction1, confidence1 = evaluate.from_local_image(sys.argv[1], model)
print("prediction: {}; confidence: {:.2f}".format(prediction1, confidence1))
prediction2, confidence2 = evaluate.from_local_image(sys.argv[1], model)
print("prediction: {}; confidence: {:.2f}".format(prediction2, confidence2))
| mnannt/mnist_experiments | mnist_evaluate.py | Python | mit | 402 |
import directory
import scanner
import mapper
import board
import os
class Klopfer(object):
def __init__(self, import_dir, export_dir):
self.import_dir = import_dir
self.export_dir = export_dir
print "Klopfer class"
def run(self):
# open dir and get oldest file with the given extension
dir = directory.Directory(os, self.import_dir, ['jpg', 'jpeg'])
self.imagefile = dir.get_oldest_file()
# open image
scan = scanner.Scanner(self.imagefile.name)
self.remove_image()
informations = scan.scan()
# load board_id and cards
mapping = mapper.Mapper(informations)
board_id = mapping.board_id
cards = mapping.get_cards()
# create board
current_board = board.Board(board_id, cards)
# write board to json
current_board.export_json(self.export_dir)
# remove old image
def remove_image(self):
# Uncomment in production version when multiple input files are present
# os.remove(self.imagefile.name)
pass
| slx-dev/digital-kanban | src/klopfer.py | Python | mit | 1,083 |
import svgutils.transform as sg
from common import load_svg, label_plot
fig = sg.SVGFigure("4.1in", "1.8in")
a = load_svg(snakemake.input[1])
b = load_svg(snakemake.input[0])
b.moveto(190, 0)
la = label_plot(5, 10, "a")
lb = label_plot(185, 10, "b")
fig.append([a, b, la, lb])
fig.save(snakemake.output[0])
| merfishtools/merfishtools-evaluation | scripts/fig-dataset-correlation.py | Python | mit | 311 |
#!/usr/bin/env python3
html_colors = {
"aliceblue": "f0f8ff",
"antiquewhite": "faebd7",
"aqua": "00ffff",
"aquamarine": "7fffd4",
"azure": "f0ffff",
"beige": "f5f5dc",
"bisque": "ffe4c4",
"black": "000000",
"blanchedalmond": "ffebcd",
"blue": "0000ff",
"blueviolet": "8a2be2",
"brown": "a52a2a",
"burlywood": "deb887",
"cadetblue": "5f9ea0",
"chartreuse": "7fff00",
"chocolate": "d2691e",
"coral": "ff7f50",
"cornflowerblue": "6495ed",
"cornsilk": "fff8dc",
"crimson": "dc143c",
"cyan": "00ffff",
"darkblue": "00008b",
"darkcyan": "008b8b",
"darkgoldenrod": "b8860b",
"darkgray": "a9a9a9",
"darkgreen": "006400",
"darkkhaki": "bdb76b",
"darkmagenta": "8b008b",
"darkolivegreen": "556b2f",
"darkorange": "ff8c00",
"darkorchid": "9932cc",
"darkred": "8b0000",
"darksalmon": "e9967a",
"darkseagreen": "8fbc8b",
"darkslateblue": "483d8b",
"darkslategray": "2f4f4f",
"darkturquoise": "00ced1",
"darkviolet": "9400d3",
"deeppink": "ff1493",
"deepskyblue": "00bfff",
"dimgray": "696969",
"dodgerblue": "1e90ff",
"firebrick": "b22222",
"floralwhite": "fffaf0",
"forestgreen": "228b22",
"fuchsia": "ff00ff",
"gainsboro": "dcdcdc",
"ghostwhite": "f8f8ff",
"gold": "ffd700",
"goldenrod": "daa520",
"gray": "808080",
"green": "008000",
"greenyellow": "adff2f",
"honeydew": "f0fff0",
"hotpink": "ff69b4",
"indianred": "cd5c5c",
"indigo": "4b0082",
"ivory": "fffff0",
"khaki": "f0e68c",
"lavender": "e6e6fa",
"lavenderblush": "fff0f5",
"lawngreen": "7cfc00",
"lemonchiffon": "fffacd",
"lightblue": "add8e6",
"lightcoral": "f08080",
"lightcyan": "e0ffff",
"lightgoldenrodyellow": "fafad2",
"lightgray": "d3d3d3",
"lightgreen": "90ee90",
"lightpink": "ffb6c1",
"lightsalmon": "ffa07a",
"lightsalmon": "ffa07a",
"lightseagreen": "20b2aa",
"lightskyblue": "87cefa",
"lightslategray": "778899",
"lightsteelblue": "b0c4de",
"lightyellow": "ffffe0",
"lime": "00ff00",
"limegreen": "32cd32",
"linen": "faf0e6",
"magenta": "ff00ff",
"maroon": "800000",
"mediumaquamarine": "66cdaa",
"mediumblue": "0000cd",
"mediumorchid": "ba55d3",
"mediumpurple": "9370db",
"mediumseagreen": "3cb371",
"mediumslateblue": "7b68ee",
"mediumspringgreen": "00fa9a",
"mediumturquoise": "48d1cc",
"mediumvioletred": "c71585",
"midnightblue": "191970",
"mintcream": "f5fffa",
"mistyrose": "ffe4e1",
"moccasin": "ffe4b5",
"navajowhite": "ffdead",
"navy": "000080",
"oldlace": "fdf5e6",
"olive": "808000",
"olivedrab": "6b8e23",
"orange": "ffa500",
"orangered": "ff4500",
"orchid": "da70d6",
"palegoldenrod": "eee8aa",
"palegreen": "98fb98",
"paleturquoise": "afeeee",
"palevioletred": "db7093",
"papayawhip": "ffefd5",
"peachpuff": "ffdab9",
"peru": "cd853f",
"pink": "ffc0cb",
"plum": "dda0dd",
"powderblue": "b0e0e6",
"purple": "800080",
"rebeccapurple": "663399",
"red": "ff0000",
"rosybrown": "bc8f8f",
"royalblue": "4169e1",
"saddlebrown": "8b4513",
"salmon": "fa8072",
"sandybrown": "f4a460",
"seagreen": "2e8b57",
"seashell": "fff5ee",
"sienna": "a0522d",
"silver": "c0c0c0",
"skyblue": "87ceeb",
"slateblue": "6a5acd",
"slategray": "708090",
"snow": "fffafa",
"springgreen": "00ff7f",
"steelblue": "4682b4",
"tan": "d2b48c",
"teal": "008080",
"thistle": "d8bfd8",
"tomato": "ff6347",
"turquoise": "40e0d0",
"violet": "ee82ee",
"wheat": "f5deb3",
"white": "ffffff",
"whitesmoke": "f5f5f5",
"yellow": "ffff00",
"yellowgreen": "9acd32",
}
def get_html_color(color):
return html_colors.get(color, None)
# Input: a hex string like this: "abcdef"
def get_gray_value(hex_str):
# https://stackoverflow.com/a/17619494
gray_value = (
int(hex_str[0:2], 16) / 255.0 * 0.2126
+ int(hex_str[2:4], 16) / 255.0 * 0.7152
+ int(hex_str[4:], 16) / 255.0 * 0.0722
)
if gray_value <= 0.0031308:
gray_value *= 12.92
else:
gray_value = 1.055 * gray_value ** (1 / 2.4) - 0.055
return gray_value
if __name__ == "__main__":
import os
color_hex_str = get_html_color(os.environ.get("COLOR", "white")) or "ffffff"
print("Using hex value: ", color_hex_str)
gray_value = get_gray_value(color_hex_str)
print("Grayscale value: ", gray_value)
| flypenguin/docker-loadummy | gray_conversion.py | Python | mit | 4,637 |