repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
thnee/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/iosxr/argspec/l3_interfaces/__init__.py
|
12133432
| |
luhn/tern
|
refs/heads/master
|
tern/adapters/adapterbase.py
|
1
|
from abc import ABCMeta, abstractmethod
class AdapterBase(object):
__metaclass__ = ABCMeta
# A list of attributes to be defined in config.
# (var name, display name, default)
config = [
('host', 'Host', ''),
('port', 'Port', ''),
('dbname', 'Database name', ''),
('username', 'Username', ''),
('password', 'Password', ''),
]
@abstractmethod
def __init__(
self, host, dbname, username, password, port=None, tern_table='tern'
):
"""
Initialize the Adapter; do not yet connect to database.
"""
pass
def __enter__(self):
self.open()
def __exit__(self, type, value, traceback):
self.close()
@abstractmethod
def open(self):
"""
Do whatever needs to be done to open a connection.
"""
pass
@abstractmethod
def close(self):
"""
Do whatever needs to be done to close the connection.
"""
pass
@abstractmethod
def initialize_tern(self):
"""
Create a table, named ``self.tern_table`` with the following
attributes:
* hash (text, primary key)
* created_at (64-bit integer, not null)
* setup (text, not null)
* teardown (text, not null)
* order (int, not null)
"""
pass
@abstractmethod
def verify_tern(self):
"""
Verify that the setup function has been executed successfully, throws
a tern.exceptions.NotInitialized if not.
"""
pass
@abstractmethod
def apply(self, changeset):
"""
Apply the given changeset. If the order is ``None``, an ordering
should be assigned. (max(order) + 1)
:param changeset: The changeset to apply.
:type changeset: tern.Changeset
"""
pass
@abstractmethod
def revert(self, changeset):
"""
Revert the given changeset.
:param changeset: The changeset to revert.
:type changeset: tern.Changeset
"""
pass
@abstractmethod
def test(self, changeset):
"""
Apply and revert the given changeset, and then rollback the
transaction. Throws any SQL errors that might occur.
:param changeset: The changeset to test.
:type changeset: tern.Changeset
"""
pass
@abstractmethod
def get_applied(self):
"""
Return a list of Changeset objects which have been applied to the
database.
"""
pass
|
350dotorg/Django
|
refs/heads/master
|
django/utils/daemonize.py
|
452
|
import os
import sys
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
|
myang321/django
|
refs/heads/master
|
tests/migrations/test_migrations_backwards_deps_1/0001_initial.py
|
416
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
operations = []
|
sloth4413/crate
|
refs/heads/master
|
docs/bootstrap.py
|
95
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
hurricup/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/repo.py
|
88
|
# repo.py - repository base classes for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import error
class repository(object):
def capable(self, name):
'''tell whether repo supports named capability.
return False if not supported.
if boolean capability, return True.
if string capability, return string.'''
if name in self.capabilities:
return True
name_eq = name + '='
for cap in self.capabilities:
if cap.startswith(name_eq):
return cap[len(name_eq):]
return False
def requirecap(self, name, purpose):
'''raise an exception if the given capability is not present'''
if not self.capable(name):
raise error.CapabilityError(
_('cannot %s; remote repository does not '
'support the %r capability') % (purpose, name))
def local(self):
return False
def cancopy(self):
return self.local()
def rjoin(self, path):
url = self.url()
if url.endswith('/'):
return url + path
else:
return url + '/' + path
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/formatter/continuationIndentForCollectionsAndComprehensions_after.py
|
15
|
l = [
1,
2,
3
]
lc = [
x
for x
in range(42)
if x
]
s = {
1,
2,
3
}
sc = {
x
for x
in range(42)
if x
}
t = (
1,
2,
3
)
g = (
x
for x
in range(42)
if x
)
d = {
1: True,
2: False,
3: None
}
dc = {
x: None
for x
in range(42)
if x
}
|
mitya57/django
|
refs/heads/master
|
tests/model_forms/test_uuid.py
|
90
|
from django import forms
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import UUIDPK
class UUIDPKForm(forms.ModelForm):
class Meta:
model = UUIDPK
fields = '__all__'
class ModelFormBaseTest(TestCase):
def test_create_save_error(self):
form = UUIDPKForm({})
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_update_save_error(self):
obj = UUIDPK.objects.create(name='foo')
form = UUIDPKForm({}, instance=obj)
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be changed because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_model_multiple_choice_field_uuid_pk(self):
f = forms.ModelMultipleChoiceField(UUIDPK.objects.all())
with self.assertRaisesMessage(ValidationError, "'invalid_uuid' is not a valid UUID."):
f.clean(['invalid_uuid'])
|
okolisny/integration_tests
|
refs/heads/master
|
cfme/utils/blockers.py
|
1
|
# -*- coding: utf-8 -*-
import re
import six
import xmlrpclib
from github import Github
from urlparse import urlparse
from fixtures.pytest_store import store
from cfme.utils import classproperty, conf, version
from cfme.utils.bz import Bugzilla
from cfme.utils.log import logger
class Blocker(object):
"""Base class for all blockers
REQUIRED THING! Any subclass' constructors must accept kwargs and after POPping the values
required for the blocker's operation, `call to ``super`` with ``**kwargs`` must be done!
Failing to do this will render some of the functionality disabled ;).
"""
blocks = False
kwargs = {}
def __init__(self, **kwargs):
self.forced_streams = kwargs.pop("forced_streams", [])
self.__dict__["kwargs"] = kwargs
@property
def url(self):
raise NotImplementedError('You need to implement .url')
@classmethod
def all_blocker_engines(cls):
"""Return mapping of name:class of all the blocker engines in this module.
Having this as a separate function will later enable to scatter the engines across modules
in case of extraction into a separate library.
"""
return {
'GH': GH,
'BZ': BZ,
}
@classmethod
def parse(cls, blocker, **kwargs):
"""Create a blocker object from some representation"""
if isinstance(blocker, cls):
return blocker
elif isinstance(blocker, six.string_types):
if "#" in blocker:
# Generic blocker
engine, spec = blocker.split("#", 1)
try:
engine_class = cls.all_blocker_engines()[engine]
except KeyError:
raise ValueError(
"{} is a wrong engine specification for blocker! ({} available)".format(
engine, ", ".join(cls.all_blocker_engines().keys())))
return engine_class(spec, **kwargs)
# EXTEND: If someone has other ideas, put them here
raise ValueError("Could not parse blocker {}".format(blocker))
else:
raise ValueError("Wrong specification of the blockers!")
class GH(Blocker):
DEFAULT_REPOSITORY = conf.env.get("github", {}).get("default_repo")
_issue_cache = {}
@classproperty
def github(cls):
if not hasattr(cls, "_github"):
token = conf.env.get("github", {}).get("token")
if token is not None:
cls._github = Github(token)
else:
cls._github = Github() # Without auth max 60 req/hr
return cls._github
def __init__(self, description, **kwargs):
super(GH, self).__init__(**kwargs)
self._repo = None
self.issue = None
self.upstream_only = kwargs.get('upstream_only', True)
self.since = kwargs.get('since')
self.until = kwargs.get('until')
if isinstance(description, (list, tuple)):
try:
self.repo, self.issue = description
self.issue = int(self.issue)
except ValueError:
raise ValueError(
"The GH issue specification must have 2 items and issue must be number")
elif isinstance(description, int):
if self.DEFAULT_REPOSITORY is None:
raise ValueError("You must specify github/default_repo in env.yaml!")
self.issue = description
elif isinstance(description, basestring):
try:
owner, repo, issue_num = re.match(r"^([^/]+)/([^/:]+):([0-9]+)$",
str(description).strip()).groups()
except AttributeError:
raise ValueError(
"Could not parse '{}' as a proper GH issue anchor!".format(str(description)))
else:
self._repo = "{}/{}".format(owner, repo)
self.issue = int(issue_num)
else:
raise ValueError("GH issue specified wrong")
@property
def data(self):
identifier = "{}:{}".format(self.repo, self.issue)
if identifier not in self._issue_cache:
self._issue_cache[identifier] = self.github.get_repo(self.repo).get_issue(self.issue)
return self._issue_cache[identifier]
@property
def blocks(self):
if self.upstream_only and version.appliance_is_downstream():
return False
if self.data.state == "closed":
return False
# Now let's check versions
if self.since is None and self.until is None:
# No version specifics
return True
elif self.since is not None and self.until is not None:
# since inclusive, until exclusive
return self.since <= version.current_version() < self.until
elif self.since is not None:
# Only since
return version.current_version() >= self.since
elif self.until is not None:
# Only until
return version.current_version() < self.until
# All branches covered
@property
def repo(self):
return self._repo or self.DEFAULT_REPOSITORY
def __str__(self):
return "GitHub Issue https://github.com/{}/issues/{}".format(self.repo, self.issue)
@property
def url(self):
return "https://github.com/{}/issues/{}".format(self.repo, self.issue)
class BZ(Blocker):
@classproperty
def bugzilla(cls):
if not hasattr(cls, "_bugzilla"):
cls._bugzilla = Bugzilla.from_config()
return cls._bugzilla
def __init__(self, bug_id, **kwargs):
self.ignore_bugs = kwargs.pop("ignore_bugs", [])
super(BZ, self).__init__(**kwargs)
self.bug_id = int(bug_id)
@property
def data(self):
return self.bugzilla.resolve_blocker(
self.bug_id, ignore_bugs=self.ignore_bugs, force_block_streams=self.forced_streams)
@property
def bugzilla_bug(self):
if self.data is None:
return None
return self.data
@property
def blocks(self):
try:
bug = self.data
if bug is None:
return False
result = False
if bug.is_opened:
result = True
if bug.upstream_bug:
if not version.appliance_is_downstream() and bug.can_test_on_upstream:
result = False
if result is False and version.appliance_is_downstream():
if bug.fixed_in is not None:
return version.current_version() < bug.fixed_in
return result
except xmlrpclib.Fault as e:
code = e.faultCode
s = e.faultString.strip().split("\n")[0]
logger.error("Bugzilla thrown a fault: %s/%s", code, s)
logger.warning("Ignoring and taking the bug as non-blocking")
store.terminalreporter.write(
"Bugzila made a booboo: {}/{}\n".format(code, s), bold=True)
return False
def get_bug_url(self):
bz_url = urlparse(self.bugzilla.bugzilla.url)
return "{}://{}/show_bug.cgi?id={}".format(bz_url.scheme, bz_url.netloc, self.bug_id)
@property
def url(self):
return self.get_bug_url()
def __str__(self):
return "Bugzilla bug {} (or one of its copies)".format(self.get_bug_url())
|
yongshengwang/hue
|
refs/heads/master
|
build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/core/management/validation.py
|
107
|
import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
e.add(opts, 'The REQUIRED_FIELDS must be a list or tuple.')
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
try:
from django.utils.image import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install Pillow. Get it at https://pypi.python.org/pypi/Pillow.' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if isinstance(f, models.GenericIPAddressField) and not getattr(f, 'null', False) and getattr(f, 'blank', False):
e.add(opts, '"%s": GenericIPAddressField can not accept blank values if null values are not allowed, as blank values are stored as null.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if isinstance(c, six.string_types) or not is_iterable(c) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-item iterables (e.g. list of 2 item tuples).' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if f.requires_unique_target:
if len(f.foreign_related_fields) > 1:
has_unique_field = False
for rel_field in f.foreign_related_fields:
has_unique_field = has_unique_field or rel_field.unique
if not has_unique_field:
e.add(opts, "Field combination '%s' under model '%s' must have a unique=True constraint" % (','.join([rel_field.name for rel_field in f.foreign_related_fields]), f.rel.to.__name__))
else:
if not f.foreign_related_fields[0].unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.foreign_related_fields[0].name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
|
milchakov/omim
|
refs/heads/master
|
tools/python/transit/transit_color_palette.py
|
10
|
import math
def to_rgb(color_str):
if len(color_str) != 6:
return (0, 0, 0)
r = int(color_str[0:2], 16)
g = int(color_str[2:4], 16)
b = int(color_str[4:], 16)
return (r, g, b)
def blend_colors(rgb_array1, rgb_array2, k):
return (rgb_array1[0] * (1.0 - k) + rgb_array2[0] * k,
rgb_array1[1] * (1.0 - k) + rgb_array2[1] * k,
rgb_array1[2] * (1.0 - k) + rgb_array2[2] * k)
def rgb_pivot(n):
result = n / 12.92
if n > 0.04045:
result = ((n + 0.055) / 1.055) ** 2.4
return result * 100.0;
def to_xyz(rgb_array):
r = rgb_pivot(rgb_array[0] / 255.0);
g = rgb_pivot(rgb_array[1] / 255.0);
b = rgb_pivot(rgb_array[2] / 255.0);
return (r * 0.4124 + g * 0.3576 + b * 0.1805,
r * 0.2126 + g * 0.7152 + b * 0.0722,
r * 0.0193 + g * 0.1192 + b * 0.9505)
#https://en.wikipedia.org/wiki/Lab_color_space#CIELAB
def lab_pivot(n):
if n > 0.008856:
return n ** (1.0/3.0)
return (903.3 * n + 16.0) / 116.0
def to_lab(rgb_array):
xyz = to_xyz(rgb_array)
x = lab_pivot(xyz[0] / 95.047)
y = lab_pivot(xyz[1] / 100.0)
z = lab_pivot(xyz[2] / 108.883)
l = 116.0 * y - 16.0
if l < 0.0:
l = 0.0
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return (l, a, b)
def lum_distance(ref_color, src_color):
return 30 * (ref_color[0] - src_color[0]) ** 2 +\
59 * (ref_color[1] - src_color[1]) ** 2 +\
11 * (ref_color[2] - src_color[2]) ** 2
def is_bluish(rgb_array):
d1 = lum_distance((255, 0, 0), rgb_array)
d2 = lum_distance((0, 0, 255), rgb_array)
return d2 < d1
#http://en.wikipedia.org/wiki/Color_difference#CIE94
def cie94(ref_color, src_color):
lab_ref = to_lab(ref_color)
lab_src = to_lab(src_color)
deltaL = lab_ref[0] - lab_src[0]
deltaA = lab_ref[1] - lab_src[1]
deltaB = lab_ref[2] - lab_src[2]
c1 = math.sqrt(lab_ref[0] * lab_ref[0] + lab_ref[1] * lab_ref[1])
c2 = math.sqrt(lab_src[0] * lab_src[0] + lab_src[1] * lab_src[1])
deltaC = c1 - c2
deltaH = deltaA * deltaA + deltaB * deltaB - deltaC * deltaC
if deltaH < 0.0:
deltaH = 0.0
else:
deltaH = math.sqrt(deltaH)
# cold tones if a color is more bluish.
Kl = 1.0
K1 = 0.045
K2 = 0.015
sc = 1.0 + K1 * c1
sh = 1.0 + K2 * c1
deltaLKlsl = deltaL / Kl
deltaCkcsc = deltaC / sc
deltaHkhsh = deltaH / sh
i = deltaLKlsl * deltaLKlsl + deltaCkcsc * deltaCkcsc + deltaHkhsh * deltaHkhsh
if i < 0:
return 0.0
return math.sqrt(i)
class Palette:
def __init__(self, colors):
self.colors = {}
for name, color_info in colors['colors'].items():
self.colors[name] = to_rgb(color_info['clear'])
def get_default_color(self):
return 'default'
def get_nearest_color(self, color_str, casing_color_str, excluded_names):
"""Returns the nearest color from the palette."""
nearest_color_name = self.get_default_color()
color = to_rgb(color_str)
if (casing_color_str is not None and len(casing_color_str) != 0):
color = blend_colors(color, to_rgb(casing_color_str), 0.5)
min_diff = None
bluish = is_bluish(color)
for name, palette_color in self.colors.items():
# Uncomment if you want to exclude duplicates.
#if name in excluded_names:
# continue
if bluish:
diff = lum_distance(palette_color, color)
else:
diff = cie94(palette_color, color)
if min_diff is None or diff < min_diff:
min_diff = diff
nearest_color_name = name
# Left here for debug purposes.
#print("Result: " + color_str + "," + str(casing_color_str) +
# " - " + nearest_color_name + ": bluish = " + str(bluish))
return nearest_color_name
|
coberger/DIRAC
|
refs/heads/integration
|
ResourceStatusSystem/Policy/JobRunningMatchedRatioPolicy.py
|
10
|
# $HeadURL: $
""" JobRunningMatchedRatioPolicy
Policy that calculates the efficiency following the formula:
( running ) / ( running + matched + received + checking )
if the denominator is smaller than 10, it does not take any decision.
"""
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
__RCSID__ = '$Id: JobRunningMatchedRatioPolicy.py 60769 2013-01-18 11:50:36Z ubeda $'
class JobRunningMatchedRatioPolicy( PolicyBase ):
"""
The JobRunningMatchedRatioPolicy class is a policy that checks the efficiency of the
jobs according to what is on JobDB.
Evaluates the JobRunningMatchedRatioPolicy results given by the JobCommand.JobCommand
"""
@staticmethod
def _evaluate( commandResult ):
""" _evaluate
efficiency < 0.5 :: Banned
efficiency < 0.9 :: Degraded
"""
result = {
'Status' : None,
'Reason' : None
}
if not commandResult[ 'OK' ]:
result[ 'Status' ] = 'Error'
result[ 'Reason' ] = commandResult[ 'Message' ]
return S_OK( result )
commandResult = commandResult[ 'Value' ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
commandResult = commandResult[ 0 ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
running = float( commandResult[ 'Running' ] )
matched = float( commandResult[ 'Matched' ] )
received = float( commandResult[ 'Received' ] )
checking = float( commandResult[ 'Checking' ] )
total = running + matched + received + checking
#we want a minimum amount of jobs to take a decision ( at least 10 pilots )
if total < 10:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'Not enough jobs to take a decision'
return S_OK( result )
efficiency = running / total
if efficiency < 0.5:
result[ 'Status' ] = 'Banned'
elif efficiency < 0.9:
result[ 'Status' ] = 'Degraded'
else:
result[ 'Status' ] = 'Active'
result[ 'Reason' ] = 'Job Running / Matched ratio of %.2f' % efficiency
return S_OK( result )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
sudovijay/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/xminus.py
|
118
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
)
from ..utils import (
int_or_none,
parse_filesize,
)
class XMinusIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html',
'md5': '401a15f2d2dcf6d592cb95528d72a2a8',
'info_dict': {
'id': '4542',
'ext': 'mp3',
'title': 'Леонид Агутин-Песенка шофера',
'duration': 156,
'tbr': 320,
'filesize_approx': 5900000,
'view_count': int,
'description': 'md5:03238c5b663810bc79cf42ef3c03e371',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
artist = self._html_search_regex(
r'minus_track\.artist="(.+?)"', webpage, 'artist')
title = artist + '-' + self._html_search_regex(
r'minus_track\.title="(.+?)"', webpage, 'title')
duration = int_or_none(self._html_search_regex(
r'minus_track\.dur_sec=\'([0-9]*?)\'',
webpage, 'duration', fatal=False))
filesize_approx = parse_filesize(self._html_search_regex(
r'<div id="finfo"[^>]*>\s*↓\s*([0-9.]+\s*[a-zA-Z][bB])',
webpage, 'approximate filesize', fatal=False))
tbr = int_or_none(self._html_search_regex(
r'<div class="quality[^"]*"></div>\s*([0-9]+)\s*kbps',
webpage, 'bitrate', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<div class="quality.*?► ([0-9]+)',
webpage, 'view count', fatal=False))
description = self._html_search_regex(
r'(?s)<div id="song_texts">(.*?)</div><br',
webpage, 'song lyrics', fatal=False)
if description:
description = re.sub(' *\r *', '\n', description)
enc_token = self._html_search_regex(
r'minus_track\.s?tkn="(.+?)"', webpage, 'enc_token')
token = ''.join(
c if pos == 3 else compat_chr(compat_ord(c) - 1)
for pos, c in enumerate(reversed(enc_token)))
video_url = 'http://x-minus.org/dwlf/%s/%s.mp3' % (video_id, token)
return {
'id': video_id,
'title': title,
'url': video_url,
'duration': duration,
'filesize_approx': filesize_approx,
'tbr': tbr,
'view_count': view_count,
'description': description,
}
|
wilywampa/python-mode
|
refs/heads/develop
|
pymode/libs/pylama/config.py
|
1
|
"""Parse arguments from command line and configuration files."""
import fnmatch
import os
import sys
import re
import logging
from argparse import ArgumentParser
from . import __version__
from .libs.inirama import Namespace
from .lint.extensions import LINTERS
#: A default checkers
DEFAULT_LINTERS = 'pycodestyle', 'pyflakes', 'mccabe'
CURDIR = os.getcwd()
CONFIG_FILES = 'pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini'
#: The skip pattern
SKIP_PATTERN = re.compile(r'# *noqa\b', re.I).search
# Parse a modelines
MODELINE_RE = re.compile(r'^\s*#\s+(?:pylama:)\s*((?:[\w_]*=[^:\n\s]+:?)+)', re.I | re.M)
# Setup a logger
LOGGER = logging.getLogger('pylama')
LOGGER.propagate = False
STREAM = logging.StreamHandler(sys.stdout)
LOGGER.addHandler(STREAM)
class _Default(object):
def __init__(self, value=None):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<_Default [%s]>" % self.value
def split_csp_str(s):
""" Split comma separated string into unique values, keeping their order.
:returns: list of splitted values
"""
seen = set()
values = s if isinstance(s, (list, tuple)) else s.strip().split(',')
return [x for x in values if x and not (x in seen or seen.add(x))]
def parse_linters(linters):
""" Initialize choosen linters.
:returns: list of inited linters
"""
result = list()
for name in split_csp_str(linters):
linter = LINTERS.get(name)
if linter:
result.append((name, linter))
else:
logging.warn("Linter `%s` not found.", name)
return result
PARSER = ArgumentParser(description="Code audit tool for python.")
PARSER.add_argument(
"paths", nargs='*', default=_Default([CURDIR]),
help="Paths to files or directories for code check.")
PARSER.add_argument(
"--verbose", "-v", action='store_true', help="Verbose mode.")
PARSER.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
PARSER.add_argument(
"--format", "-f", default=_Default('pycodestyle'),
choices=['pep8', 'pycodestyle', 'pylint', 'parsable'],
help="Choose errors format (pycodestyle, pylint, parsable).")
PARSER.add_argument(
"--select", "-s", default=_Default(''), type=split_csp_str,
help="Select errors and warnings. (comma-separated list)")
PARSER.add_argument(
"--sort", default=_Default(''), type=split_csp_str,
help="Sort result by error types. Ex. E,W,D")
PARSER.add_argument(
"--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
type=parse_linters, help=(
"Select linters. (comma-separated). Choices are %s."
% ','.join(s for s in LINTERS.keys())
))
PARSER.add_argument(
"--ignore", "-i", default=_Default(''), type=split_csp_str,
help="Ignore errors and warnings. (comma-separated)")
PARSER.add_argument(
"--skip", default=_Default(''),
type=lambda s: [re.compile(fnmatch.translate(p)) for p in s.split(',') if p],
help="Skip files by masks (comma-separated, Ex. */messages.py)")
PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]")
PARSER.add_argument(
"--hook", action="store_true", help="Install Git (Mercurial) hook.")
PARSER.add_argument(
"--async", action="store_true",
help="Enable async mode. Usefull for checking a lot of files. "
"Dont supported with pylint.")
PARSER.add_argument(
"--options", "-o", default="",
help="Select configuration file. By default is '<CURDIR>/pylama.ini'")
PARSER.add_argument(
"--force", "-F", action='store_true', default=_Default(False),
help="Force code checking (if linter doesnt allow)")
PARSER.add_argument(
"--abspath", "-a", action='store_true', default=_Default(False),
help="Use absolute paths in output.")
ACTIONS = dict((a.dest, a) for a in PARSER._actions)
def parse_options(args=None, config=True, rootdir=CURDIR, **overrides): # noqa
""" Parse options from command line and configuration files.
:return argparse.Namespace:
"""
if args is None:
args = []
# Parse args from command string
options = PARSER.parse_args(args)
options.file_params = dict()
options.linters_params = dict()
# Compile options from ini
if config:
cfg = get_config(str(options.options), rootdir=rootdir)
for opt, val in cfg.default.items():
LOGGER.info('Find option %s (%s)', opt, val)
passed_value = getattr(options, opt, _Default())
if isinstance(passed_value, _Default):
if opt == 'paths':
val = val.split()
setattr(options, opt, _Default(val))
# Parse file related options
for name, opts in cfg.sections.items():
if not name.startswith('pylama'):
continue
if name == cfg.default_section:
continue
name = name[7:]
if name in LINTERS:
options.linters_params[name] = dict(opts)
continue
mask = re.compile(fnmatch.translate(name))
options.file_params[mask] = dict(opts)
# Override options
for opt, val in overrides.items():
passed_value = getattr(options, opt, _Default())
if opt in ('ignore', 'select') and passed_value:
setattr(options, opt, process_value(opt, passed_value.value) + process_value(opt, val))
elif isinstance(passed_value, _Default):
setattr(options, opt, process_value(opt, val))
# Postprocess options
for name in options.__dict__:
value = getattr(options, name)
if isinstance(value, _Default):
setattr(options, name, process_value(name, value.value))
if options.async and 'pylint' in options.linters:
LOGGER.warn('Cant parse code asynchronously while pylint is enabled.')
options.async = False
return options
def process_value(name, value):
""" Compile option value. """
action = ACTIONS.get(name)
if not action:
return value
if callable(action.type):
return action.type(value)
if action.const:
return bool(int(value))
return value
def get_config(ini_path=None, rootdir=CURDIR):
""" Load configuration from INI.
:return Namespace:
"""
config = Namespace()
config.default_section = 'pylama'
if not ini_path:
for path in CONFIG_FILES:
path = os.path.join(rootdir, path)
if os.path.isfile(path) and os.access(path, os.R_OK):
config.read(path)
else:
config.read(ini_path)
return config
def setup_logger(options):
""" Setup logger with options. """
LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN)
if options.report:
LOGGER.removeHandler(STREAM)
LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
LOGGER.info('Try to read configuration from: ' + options.options)
# pylama:ignore=W0212,D210,F0001
|
rcarrillocruz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_vxlan_gateway.py
|
46
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: ce_vxlan_gateway
version_added: "2.4"
short_description: Manages gateway for the VXLAN network on HUAWEI CloudEngine devices.
description:
- Configuring Centralized All-Active Gateways or Distributed Gateway for
the VXLAN Network on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
notes:
- Ensure All-Active Gateways or Distributed Gateway for the VXLAN Network can not configure at the same time.
options:
dfs_id:
description:
- Specifies the ID of a DFS group.
The value must be 1.
required: false
default: null
dfs_source_ip:
description:
- Specifies the IPv4 address bound to a DFS group.
The value is in dotted decimal notation.
required: false
default: null
dfs_source_vpn:
description:
- Specifies the name of a VPN instance bound to a DFS group.
The value is a string of 1 to 31 case-sensitive characters without spaces.
If the character string is quoted by double quotation marks, the character string can contain spaces.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
dfs_udp_port:
description:
- Specifies the UDP port number of the DFS group.
The value is an integer that ranges from 1025 to 65535.
required: false
default: null
dfs_all_active:
description:
- Creates all-active gateways.
required: false
choices: ['enable', 'disable']
default: null
dfs_peer_ip:
description:
- Configure the IP address of an all-active gateway peer.
The value is in dotted decimal notation.
required: false
default: null
dfs_peer_vpn:
description:
- Specifies the name of the VPN instance that is associated with all-active gateway peer.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_instance:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_vni:
description:
- Specifies a VNI ID.
Binds a VXLAN network identifier (VNI) to a virtual private network (VPN) instance.
The value is an integer ranging from 1 to 16000000.
required: false
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
vbdif_bind_vpn:
description:
- Specifies the name of the VPN instance that is associated with the interface.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vbdif_mac:
description:
- Specifies a MAC address for a VBDIF interface.
The value is in the format of H-H-H. Each H is a 4-digit hexadecimal number, such as C(00e0) or C(fc01).
If an H contains less than four digits, 0s are added ahead. For example, C(e0) is equal to C(00e0).
A MAC address cannot be all 0s or 1s or a multicast MAC address.
required: false
default: null
arp_distribute_gateway:
description:
- Enable the distributed gateway function on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
arp_direct_route:
description:
- Enable VLINK direct route on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan gateway module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring Centralized All-Active Gateways for the VXLAN Network
ce_vxlan_gateway:
dfs_id: 1
dfs_source_ip: 6.6.6.6
dfs_all_active: enable
dfs_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Bind the VPN instance to a Layer 3 gateway, enable distributed gateway, and configure host route advertisement.
ce_vxlan_gateway:
vbdif_name: Vbdif100
vbdif_bind_vpn: vpn1
arp_distribute_gateway: enable
arp_direct_route: enable
provider: "{{ cli }}"
- name: Assign a VNI to a VPN instance.
ce_vxlan_gateway:
vpn_instance: vpn1
vpn_vni: 100
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": "6.6.6.6", "dfs_all_active":"enable", "dfs_peer_ip": "7.7.7.7"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": null, "evn_peer_ip": [], "dfs_all_active": "disable"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "evn_source_ip": "6.6.6.6", "evn_source_vpn": null,
"evn_peers": [{"ip": "7.7.7.7", "vpn": ""}], "dfs_all_active": "enable"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["dfs-group 1",
"source ip 6.6.6.6",
"active-active-gateway",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr"""
if not addr:
return False
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def mac_format(mac):
"""convert mac format to xxxx-xxxx-xxxx"""
if not mac:
return None
if mac.count("-") != 2:
return None
addrs = mac.split("-")
for i in range(3):
if not addrs[i] or not addrs[i].isalnum():
return None
if len(addrs[i]) < 1 or len(addrs[i]) > 4:
return None
try:
addrs[i] = int(addrs[i], 16)
except ValueError:
return None
try:
return "%04x-%04x-%04x" % (addrs[0], addrs[1], addrs[2])
except ValueError:
return None
except TypeError:
return None
def get_dfs_source_ip(config):
"""get dfs source ip address"""
get = re.findall(r"source ip ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_source_vpn(config):
"""get dfs source ip vpn instance name"""
get = re.findall(
r"source ip [0-9]+.[0-9]+.[0-9]+.[0-9]+ vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_udp_port(config):
"""get dfs udp port"""
get = re.findall(r"udp port (\d+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_peers(config):
"""get evn peer ip list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?(vpn-instance)?\s?(\S*)", config)
if not get:
return None
else:
peers = list()
for item in get:
peers.append(dict(ip=item[0], vpn=item[2]))
return peers
def get_ip_vpn(config):
"""get ip vpn instance"""
get = re.findall(r"ip vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_ip_vpn_vni(config):
"""get ip vpn vxlan vni"""
get = re.findall(r"vxlan vni (\d+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_vpn(config):
"""get ip vpn name of interface vbdif"""
get = re.findall(r"ip binding vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_mac(config):
"""get mac address of interface vbdif"""
get = re.findall(
r" mac-address ([0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4})", config)
if not get:
return None
else:
return get[0]
class VxlanGateway(object):
"""
Manages Gateway for the VXLAN Network.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.dfs_id = self.module.params['dfs_id']
self.dfs_source_ip = self.module.params['dfs_source_ip']
self.dfs_source_vpn = self.module.params['dfs_source_vpn']
self.dfs_udp_port = self.module.params['dfs_udp_port']
self.dfs_all_active = self.module.params['dfs_all_active']
self.dfs_peer_ip = self.module.params['dfs_peer_ip']
self.dfs_peer_vpn = self.module.params['dfs_peer_vpn']
self.vpn_instance = self.module.params['vpn_instance']
self.vpn_vni = self.module.params['vpn_vni']
self.vbdif_name = self.module.params['vbdif_name']
self.vbdif_mac = self.module.params['vbdif_mac']
self.vbdif_bind_vpn = self.module.params['vbdif_bind_vpn']
self.arp_distribute_gateway = self.module.params['arp_distribute_gateway']
self.arp_direct_route = self.module.params['arp_direct_route']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = " | ignore-case section include dfs-group"
if self.vpn_instance:
exp += "|^ip vpn-instance %s$" % self.vpn_instance
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
flags.append(exp)
return get_config(self.module, flags)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_dfs_group(self):
"""manage Dynamic Fabric Service (DFS) group configuration"""
if not self.dfs_id:
return
dfs_view = False
view_cmd = "dfs-group %s" % self.dfs_id
exist = is_config_exist(self.config, view_cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
dfs_view = True
# undo dfs-group dfs-group-id
if self.state == "absent" and exist:
if not self.dfs_source_ip and not self.dfs_udp_port and not self.dfs_all_active and not self.dfs_peer_ip:
self.cli_add_command(view_cmd, undo=True)
return
# [undo] source ip ip-address [ vpn-instance vpn-instance-name ]
if self.dfs_source_ip:
cmd = "source ip %s" % self.dfs_source_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_source_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] udp port port-number
if self.dfs_udp_port:
cmd = "udp port %s" % self.dfs_udp_port
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] active-active-gateway
# [undo]peer[ vpn-instance vpn-instance-name ]
aa_cmd = "active-active-gateway"
aa_exist = is_config_exist(self.config, aa_cmd)
aa_view = False
if self.dfs_all_active == "disable":
if aa_exist:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd, undo=True)
elif self.dfs_all_active == "enable":
if not aa_exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
aa_view = True
if self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else: # not input dfs_all_active
if aa_exist and self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else:
pass
elif not aa_exist and self.dfs_peer_ip and self.state == "present":
self.module.fail_json(
msg="Error: All-active gateways is not enable.")
else:
pass
if dfs_view:
self.cli_add_command("quit")
def config_ip_vpn(self):
"""configure command at the ip vpn view"""
if not self.vpn_instance or not self.vpn_vni:
return
# ip vpn-instance vpn-instance-name
view_cmd = "ip vpn-instance %s" % self.vpn_instance
exist = is_config_exist(self.config, view_cmd)
if not exist:
self.module.fail_json(
msg="Error: ip vpn instance %s is not exist." % self.vpn_instance)
# [undo] vxlan vni vni-id
cmd = "vxlan vni %s" % self.vpn_vni
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
if not self.vbdif_name:
return
vbdif_cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, vbdif_cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s is not exist." % self.vbdif_name)
# interface vbdif bd-id
# [undo] ip binding vpn-instance vpn-instance-name
vbdif_view = False
if self.vbdif_bind_vpn:
cmd = "ip binding vpn-instance %s" % self.vbdif_bind_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp distribute-gateway enable
if self.arp_distribute_gateway:
cmd = "arp distribute-gateway enable"
exist = is_config_exist(self.config, cmd)
if self.arp_distribute_gateway == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_distribute_gateway == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp direct-route enable
if self.arp_direct_route:
cmd = "arp direct-route enable"
exist = is_config_exist(self.config, cmd)
if self.arp_direct_route == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_direct_route == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# mac-address mac-address
# undo mac-address
if self.vbdif_mac:
cmd = "mac-address %s" % self.vbdif_mac
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command("undo mac-address")
# quit
if vbdif_view:
self.cli_add_command("quit")
def is_valid_vbdif(self, ifname):
"""check is interface vbdif"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def is_valid_ip_vpn(self, vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
self.module.fail_json(
msg="Error: The value C(_public_) is reserved and cannot be used as the VPN instance name.")
if len(vpname) < 1 or len(vpname) > 31:
self.module.fail_json(
msg="Error: IP vpn name length is not in the range from 1 to 31.")
return True
def check_params(self):
"""Check all input params"""
# dfs id check
if self.dfs_id:
if not self.dfs_id.isdigit():
self.module.fail_json(msg="Error: DFS id is not digit.")
if int(self.dfs_id) != 1:
self.module.fail_json(msg="Error: DFS is not 1.")
# dfs_source_ip check
if self.dfs_source_ip:
if not is_valid_v4addr(self.dfs_source_ip):
self.module.fail_json(msg="Error: dfs_source_ip is invalid.")
# dfs_source_vpn check
if self.dfs_source_vpn and not self.is_valid_ip_vpn(self.dfs_source_vpn):
self.module.fail_json(msg="Error: dfs_source_vpn is invalid.")
# dfs_source_vpn and dfs_source_ip must set at the same time
if self.dfs_source_vpn and not self.dfs_source_ip:
self.module.fail_json(
msg="Error: dfs_source_vpn and dfs_source_ip must set at the same time.")
# dfs_udp_port check
if self.dfs_udp_port:
if not self.dfs_udp_port.isdigit():
self.module.fail_json(
msg="Error: dfs_udp_port id is not digit.")
if int(self.dfs_udp_port) < 1025 or int(self.dfs_udp_port) > 65535:
self.module.fail_json(
msg="dfs_udp_port is not ranges from 1025 to 65535.")
# dfs_peer_ip check
if self.dfs_peer_ip:
if not is_valid_v4addr(self.dfs_peer_ip):
self.module.fail_json(msg="Error: dfs_peer_ip is invalid.")
# dfs_peer_vpn check
if self.dfs_peer_vpn and not self.is_valid_ip_vpn(self.dfs_peer_vpn):
self.module.fail_json(msg="Error: dfs_peer_vpn is invalid.")
# dfs_peer_vpn and dfs_peer_ip must set at the same time
if self.dfs_peer_vpn and not self.dfs_peer_ip:
self.module.fail_json(
msg="Error: dfs_peer_vpn and dfs_peer_ip must set at the same time.")
# vpn_instance check
if self.vpn_instance and not self.is_valid_ip_vpn(self.vpn_instance):
self.module.fail_json(msg="Error: vpn_instance is invalid.")
# vpn_vni check
if self.vpn_vni:
if not self.vpn_vni.isdigit():
self.module.fail_json(msg="Error: vpn_vni id is not digit.")
if int(self.vpn_vni) < 1 or int(self.vpn_vni) > 16000000:
self.module.fail_json(
msg="vpn_vni is not ranges from 1 to 16000000.")
# vpn_instance and vpn_vni must set at the same time
if bool(self.vpn_instance) != bool(self.vpn_vni):
self.module.fail_json(
msg="Error: vpn_instance and vpn_vni must set at the same time.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# vbdif_mac check
if self.vbdif_mac:
mac = mac_format(self.vbdif_mac)
if not mac:
self.module.fail_json(msg="Error: vbdif_mac is invalid.")
self.vbdif_mac = mac
# vbdif_bind_vpn check
if self.vbdif_bind_vpn and not self.is_valid_ip_vpn(self.vbdif_bind_vpn):
self.module.fail_json(msg="Error: vbdif_bind_vpn is invalid.")
# All-Active Gateways or Distributed Gateway config can not set at the
# same time.
if self.dfs_id:
if self.vpn_vni or self.arp_distribute_gateway == "enable":
self.module.fail_json(msg="Error: All-Active Gateways or Distributed Gateway config "
"can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.dfs_id:
self.proposed["dfs_id"] = self.dfs_id
self.proposed["dfs_source_ip"] = self.dfs_source_ip
self.proposed["dfs_source_vpn"] = self.dfs_source_vpn
self.proposed["dfs_udp_port"] = self.dfs_udp_port
self.proposed["dfs_all_active"] = self.dfs_all_active
self.proposed["dfs_peer_ip"] = self.dfs_peer_ip
self.proposed["dfs_peer_vpn"] = self.dfs_peer_vpn
if self.vpn_instance:
self.proposed["vpn_instance"] = self.vpn_instance
self.proposed["vpn_vni"] = self.vpn_vni
if self.vbdif_name:
self.proposed["vbdif_name"] = self.vbdif_name
self.proposed["vbdif_mac"] = self.vbdif_mac
self.proposed["vbdif_bind_vpn"] = self.vbdif_bind_vpn
self.proposed[
"arp_distribute_gateway"] = self.arp_distribute_gateway
self.proposed["arp_direct_route"] = self.arp_direct_route
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.config:
return
if is_config_exist(self.config, "dfs-group 1"):
self.existing["dfs_id"] = "1"
self.existing["dfs_source_ip"] = get_dfs_source_ip(self.config)
self.existing["dfs_source_vpn"] = get_dfs_source_vpn(self.config)
self.existing["dfs_udp_port"] = get_dfs_udp_port(self.config)
if is_config_exist(self.config, "active-active-gateway"):
self.existing["dfs_all_active"] = "enable"
self.existing["dfs_peers"] = get_dfs_peers(self.config)
else:
self.existing["dfs_all_active"] = "disable"
if self.vpn_instance:
self.existing["vpn_instance"] = get_ip_vpn(self.config)
self.existing["vpn_vni"] = get_ip_vpn_vni(self.config)
if self.vbdif_name:
self.existing["vbdif_name"] = self.vbdif_name
self.existing["vbdif_mac"] = get_vbdif_mac(self.config)
self.existing["vbdif_bind_vpn"] = get_vbdif_vpn(self.config)
if is_config_exist(self.config, "arp distribute-gateway enable"):
self.existing["arp_distribute_gateway"] = "enable"
else:
self.existing["arp_distribute_gateway"] = "disable"
if is_config_exist(self.config, "arp direct-route enable"):
self.existing["arp_direct_route"] = "enable"
else:
self.existing["arp_direct_route"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
if not config:
return
if is_config_exist(config, "dfs-group 1"):
self.end_state["dfs_id"] = "1"
self.end_state["dfs_source_ip"] = get_dfs_source_ip(config)
self.end_state["dfs_source_vpn"] = get_dfs_source_vpn(config)
self.end_state["dfs_udp_port"] = get_dfs_udp_port(config)
if is_config_exist(config, "active-active-gateway"):
self.end_state["dfs_all_active"] = "enable"
self.end_state["dfs_peers"] = get_dfs_peers(config)
else:
self.end_state["dfs_all_active"] = "disable"
if self.vpn_instance:
self.end_state["vpn_instance"] = get_ip_vpn(config)
self.end_state["vpn_vni"] = get_ip_vpn_vni(config)
if self.vbdif_name:
self.end_state["vbdif_name"] = self.vbdif_name
self.end_state["vbdif_mac"] = get_vbdif_mac(config)
self.end_state["vbdif_bind_vpn"] = get_vbdif_vpn(config)
if is_config_exist(config, "arp distribute-gateway enable"):
self.end_state["arp_distribute_gateway"] = "enable"
else:
self.end_state["arp_distribute_gateway"] = "disable"
if is_config_exist(config, "arp direct-route enable"):
self.end_state["arp_direct_route"] = "enable"
else:
self.end_state["arp_direct_route"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.dfs_id:
self.config_dfs_group()
if self.vpn_instance:
self.config_ip_vpn()
if self.vbdif_name:
self.config_vbdif()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
dfs_id=dict(required=False, type='str'),
dfs_source_ip=dict(required=False, type='str'),
dfs_source_vpn=dict(required=False, type='str'),
dfs_udp_port=dict(required=False, type='str'),
dfs_all_active=dict(required=False, type='str',
choices=['enable', 'disable']),
dfs_peer_ip=dict(required=False, type='str'),
dfs_peer_vpn=dict(required=False, type='str'),
vpn_instance=dict(required=False, type='str'),
vpn_vni=dict(required=False, type='str'),
vbdif_name=dict(required=False, type='str'),
vbdif_mac=dict(required=False, type='str'),
vbdif_bind_vpn=dict(required=False, type='str'),
arp_distribute_gateway=dict(
required=False, type='str', choices=['enable', 'disable']),
arp_direct_route=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanGateway(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
darmaa/odoo
|
refs/heads/master
|
addons/gamification_sale_crm/__init__.py
|
396
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
emedinaa/contentbox
|
refs/heads/master
|
third_party/django/core/management/commands/check.py
|
119
|
from __future__ import unicode_literals
import warnings
from django.core.checks.compatibility.base import check_compatibility
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Checks your configuration's compatibility with this version " + \
"of Django."
def handle_noargs(self, **options):
for message in check_compatibility():
warnings.warn(message)
|
has2k1/numpy
|
refs/heads/master
|
numpy/lib/tests/test_format.py
|
78
|
from __future__ import division, absolute_import, print_function
r''' Test the .npy file format.
Set up:
>>> import sys
>>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
... np.uint8,
... np.int8,
... np.uint16,
... np.int16,
... np.uint32,
... np.int32,
... np.uint64,
... np.int64,
... np.float32,
... np.float64,
... np.complex64,
... np.complex128,
... object,
... ]
>>>
>>> basic_arrays = []
>>>
>>> for scalar in scalars:
... for endian in '<>':
... dtype = np.dtype(scalar).newbyteorder(endian)
... basic = np.arange(15).astype(dtype)
... basic_arrays.extend([
... np.array([], dtype=dtype),
... np.array(10, dtype=dtype),
... basic,
... basic.reshape((3,5)),
... basic.reshape((3,5)).T,
... basic.reshape((3,5))[::-1,::2],
... ])
...
>>>
>>> Pdescr = [
... ('x', 'i4', (2,)),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> PbufferT = [
... ([3,2], [[6.,4.],[6.,4.]], 8),
... ([4,3], [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> Ndescr = [
... ('x', 'i4', (2,)),
... ('Info', [
... ('value', 'c16'),
... ('y2', 'f8'),
... ('Info2', [
... ('name', 'S2'),
... ('value', 'c16', (2,)),
... ('y3', 'f8', (2,)),
... ('z3', 'u4', (2,))]),
... ('name', 'S2'),
... ('z2', 'b1')]),
... ('color', 'S2'),
... ('info', [
... ('Name', 'U8'),
... ('Value', 'c16')]),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> NbufferT = [
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> record_arrays = [
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
... ]
Test the magic string writing.
>>> format.magic(1, 0)
'\x93NUMPY\x01\x00'
>>> format.magic(0, 0)
'\x93NUMPY\x00\x00'
>>> format.magic(255, 255)
'\x93NUMPY\xff\xff'
>>> format.magic(2, 5)
'\x93NUMPY\x02\x05'
Test the magic string reading.
>>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
>>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
>>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
>>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print repr(f.getvalue())
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
import sys
import os
import shutil
import tempfile
import warnings
from io import BytesIO
import numpy as np
from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
dec
)
from numpy.lib import format
tempdir = None
# Module-level setup.
def setup_module():
global tempdir
tempdir = tempfile.mkdtemp()
def teardown_module():
global tempdir
if tempdir is not None and os.path.isdir(tempdir):
shutil.rmtree(tempdir)
tempdir = None
# Generate some basic arrays to test with.
scalars = [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
object,
]
basic_arrays = []
for scalar in scalars:
for endian in '<>':
dtype = np.dtype(scalar).newbyteorder(endian)
basic = np.arange(1500).astype(dtype)
basic_arrays.extend([
# Empty
np.array([], dtype=dtype),
# Rank-0
np.array(10, dtype=dtype),
# 1-D
basic,
# 2-D C-contiguous
basic.reshape((30, 50)),
# 2-D F-contiguous
basic.reshape((30, 50)).T,
# 2-D non-contiguous
basic.reshape((30, 50))[::-1, ::2],
])
# More complicated record arrays.
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
]
record_arrays = [
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
]
#BytesIO that reads a random number of bytes at a time
class BytesIOSRandomSize(BytesIO):
def read(self, size=None):
import random
size = random.randint(1, size)
return super(BytesIOSRandomSize, self).read(size)
def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_randsize(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIOSRandomSize(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_truncated(arr):
f = BytesIO()
format.write_array(f, arr)
#BytesIO is one byte short
f2 = BytesIO(f.getvalue()[0:-1])
arr2 = format.read_array(f2)
return arr2
def assert_equal_(o1, o2):
assert_(o1 == o2)
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
yield assert_raises, ValueError, roundtrip_truncated, arr
def test_long_str():
# check items larger than internal buffer size, gh-4027
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
long_str_arr2 = roundtrip(long_str_arr)
assert_array_equal(long_str_arr, long_str_arr2)
@dec.slow
def test_memmap_roundtrip():
# Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
# Skip these since they can't be mmap'ed.
continue
# Write it out normally and through mmap.
nfn = os.path.join(tempdir, 'normal.npy')
mfn = os.path.join(tempdir, 'memmap.npy')
fp = open(nfn, 'wb')
try:
format.write_array(fp, arr)
finally:
fp.close()
fortran_order = (
arr.flags.f_contiguous and not arr.flags.c_contiguous)
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
shape=arr.shape, fortran_order=fortran_order)
ma[...] = arr
del ma
# Check that both of these files' contents are the same.
fp = open(nfn, 'rb')
normal_bytes = fp.read()
fp.close()
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
yield assert_equal_, normal_bytes, memmap_bytes
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
del ma
def test_compressed_roundtrip():
arr = np.random.rand(200, 200)
npz_file = os.path.join(tempdir, 'compressed.npz')
np.savez_compressed(npz_file, arr=arr)
arr1 = np.load(npz_file)['arr']
assert_array_equal(arr, arr1)
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
else:
fname = 'python3.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
data_dir = os.path.join(os.path.dirname(__file__), 'data')
if sys.version_info[0] >= 3:
xrange = range
else:
import __builtin__
xrange = __builtin__.xrange
expected = np.array([None, xrange, sixu('\u512a\u826f'),
asbytes('\xe4\xb8\x8d\xe8\x89\xaf')],
dtype=object)
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
'py3-objarr.npy', 'py3-objarr.npz']:
path = os.path.join(data_dir, fname)
if (fname.endswith('.npz') and sys.version_info[0] == 2 and
sys.version_info[1] < 7):
# Reading object arrays directly from zipfile appears to fail
# on Py2.6, see cfae0143b4
continue
for encoding in ['bytes', 'latin1']:
if (sys.version_info[0] >= 3 and sys.version_info[1] < 4 and
encoding == 'bytes'):
# The bytes encoding is available starting from Python 3.4
continue
data_f = np.load(path, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
else:
data = data_f
if sys.version_info[0] >= 3:
if encoding == 'latin1' and fname.startswith('py2'):
assert_(isinstance(data[3], str))
assert_array_equal(data[:-1], expected[:-1])
# mojibake occurs
assert_array_equal(data[-1].encode(encoding), expected[-1])
else:
assert_(isinstance(data[3], bytes))
assert_array_equal(data, expected)
else:
assert_array_equal(data, expected)
if sys.version_info[0] >= 3:
if fname.startswith('py2'):
if fname.endswith('.npz'):
data = np.load(path)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
data = np.load(path, fix_imports=False, encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
assert_raises(UnicodeError, np.load, path)
assert_raises(ImportError, np.load, path,
encoding='latin1', fix_imports=False)
def test_pickle_disallow():
data_dir = os.path.join(os.path.dirname(__file__), 'data')
path = os.path.join(data_dir, 'py2-objarr.npy')
assert_raises(ValueError, np.load, path,
allow_pickle=False, encoding='latin1')
path = os.path.join(data_dir, 'py2-objarr.npz')
f = np.load(path, allow_pickle=False, encoding='latin1')
assert_raises(ValueError, f.__getitem__, 'x')
path = os.path.join(tempdir, 'pickle-disabled.npy')
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
format.write_array(f, d, version=(2, 0))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
format.write_array(f, d)
assert_(w[0].category is UserWarning)
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.write_array, f, d, (1, 0))
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
tf = tempfile.mktemp('', 'mmap', dir=tempdir)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(1, 0))
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(2, 0))
ma[...] = d
del ma
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=None)
assert_(w[0].category is UserWarning)
ma[...] = d
del ma
ma = format.open_memmap(tf, mode='r')
assert_array_equal(ma, d)
def test_write_version():
f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
format.write_array(f, arr)
format.write_array(f, arr, version=None)
format.write_array(f, arr)
format.write_array(f, arr, version=(2, 0))
format.write_array(f, arr)
# These should all fail.
bad_versions = [
(1, 1),
(0, 0),
(0, 1),
(2, 2),
(255, 255),
]
for version in bad_versions:
try:
format.write_array(f, arr, version=version)
except ValueError:
pass
else:
raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = asbytes_nested([
'\x93NUMPY\x01\x01',
'\x93NUMPY\x00\x00',
'\x93NUMPY\x00\x01',
'\x93NUMPY\x02\x00',
'\x93NUMPY\x02\x02',
'\x93NUMPY\xff\xff',
])
malformed_magic = asbytes_nested([
'\x92NUMPY\x01\x00',
'\x00NUMPY\x01\x00',
'\x93numpy\x01\x00',
'\x93MATLB\x01\x00',
'\x93NUMPY\x01',
'\x93NUMPY',
'',
])
def test_read_magic():
s1 = BytesIO()
s2 = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s1, arr, version=(1, 0))
format.write_array(s2, arr, version=(2, 0))
s1.seek(0)
s2.seek(0)
version1 = format.read_magic(s1)
version2 = format.read_magic(s2)
assert_(version1 == (1, 0))
assert_(version2 == (2, 0))
assert_(s1.tell() == format.MAGIC_LEN)
assert_(s2.tell() == format.MAGIC_LEN)
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_magic), f
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_array), f
def test_bad_magic_args():
assert_raises(ValueError, format.magic, -1, 1)
assert_raises(ValueError, format.magic, 256, 1)
assert_raises(ValueError, format.magic, 1, -1)
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
s = BytesIO()
d = {'a': 1, 'b': 2}
format.write_array_header_1_0(s, d)
s = BytesIO()
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_read_array_header_1_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(1, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_read_array_header_2_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(2, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
def test_large_file_support():
from nose import SkipTest
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
raise SkipTest("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
# seek past end would work too, but linux truncate somewhat
# increases the chances that we have a sparse filesystem and can
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
d = np.arange(5)
np.save(f, d)
# read it back
with open(tf_name, "rb") as f:
f.seek(5368709120)
r = np.load(f)
assert_array_equal(r, d)
if __name__ == "__main__":
run_module_suite()
|
mboehn/ffc
|
refs/heads/master
|
base/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
CityManager/start_flask
|
refs/heads/master
|
a01_quickstart/learn_flask_script.py
|
1
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
from flask import Flask
from flask_script import Manager, Command, Option
app = Flask(__name__)
# configure your app
manager = Manager(app)
class Hello(Command): # 默认方式添加命令
"""prints hello world"""
def run(self):
print("hello world")
manager.add_command('hello', Hello())
class Hello_OPT(Command): # 添加命令,并未命令添加参数
def __init__(self, default_name='Xu Weiman'):
super().__init__()
self.default_name = default_name
def get_options(self):
return [
Option('-n', '--name', dest='name', default=self.default_name),
]
def run(self, name):
print("hello", name)
manager.add_command('hello_opt', Hello_OPT())
@manager.command
def hello_again(words):
"""Just say hello again"""
print("Hello again,", words)
# python learn_flask_script.py hello_again good_night
@manager.command
def say_word(words='Cool!'):
print('Hey, %s' % words)
# python learn_flask_script.py say_word good_night #爆异常
# python learn_flask_script.py say_word -w good_night #或者 -w=good_night , -w是参数的首字母
# python learn_flask_script.py say_word --words good_night #或者 --words=good_night
@manager.option('-n', '--name',dest='name', default='xuweiman', help='Your name') # dest表示option作用在哪个参数
# 如果是多个参数的,可以使用多个manager.option装饰
def say_name(name):
print("Hello", name)
# python learn_flask_script.py say_name xuweiman #报异常
# python learn_flask_script.py say_name -n xuweiman #或者-n=xuweiman ; --name xuweiman; --name=xuweiman
if __name__ == "__main__":
manager.run()
|
longmen21/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/profile_images/tests/test_views.py
|
14
|
"""
Test cases for the HTTP endpoints of the profile image api.
"""
from contextlib import closing
import datetime
from nose.plugins.attrib import attr
from pytz import UTC
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import ddt
import mock
from mock import patch
from PIL import Image
from rest_framework.test import APITestCase, APIClient
from student.tests.factories import UserFactory
from student.tests.tests import UserSettingsEventTestMixin
from openedx.core.djangoapps.user_api.accounts.image_helpers import (
set_has_profile_image,
get_profile_image_names,
get_profile_image_storage,
)
from ..images import create_profile_images, ImageValidationError
from ..views import LOG_MESSAGE_CREATE, LOG_MESSAGE_DELETE
from .helpers import make_image_file
TEST_PASSWORD = "test"
TEST_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 01, tzinfo=UTC)
TEST_UPLOAD_DT2 = datetime.datetime(2003, 1, 9, 15, 43, 01, tzinfo=UTC)
class PatchedClient(APIClient):
"""
Patch DRF's APIClient to avoid a unicode error on file upload.
Famous last words: This is a *temporary* fix that we should be
able to remove once we upgrade Django past 1.4.
"""
def request(self, *args, **kwargs):
"""Construct an API request. """
# DRF's default test client implementation uses `six.text_type()`
# to convert the CONTENT_TYPE to `unicode`. In Django 1.4,
# this causes a `UnicodeDecodeError` when Django parses a multipart
# upload.
#
# This is the DRF code we're working around:
# https://github.com/tomchristie/django-rest-framework/blob/3.1.3/rest_framework/compat.py#L227
#
# ... and this is the Django code that raises the exception:
#
# https://github.com/django/django/blob/1.4.22/django/http/multipartparser.py#L435
#
# Django unhelpfully swallows the exception, so to the application code
# it appears as though the user didn't send any file data.
#
# This appears to be an issue only with requests constructed in the test
# suite, not with the upload code used in production.
#
if isinstance(kwargs.get("CONTENT_TYPE"), basestring):
kwargs["CONTENT_TYPE"] = str(kwargs["CONTENT_TYPE"])
return super(PatchedClient, self).request(*args, **kwargs)
class ProfileImageEndpointMixin(UserSettingsEventTestMixin):
"""
Base class / shared infrastructure for tests of profile_image "upload" and
"remove" endpoints.
"""
# subclasses should override this with the name of the view under test, as
# per the urls.py configuration.
_view_name = None
client_class = PatchedClient
def setUp(self):
super(ProfileImageEndpointMixin, self).setUp()
self.user = UserFactory.create(password=TEST_PASSWORD)
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.save()
self.url = reverse(self._view_name, kwargs={'username': self.user.username})
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.storage = get_profile_image_storage()
self.table = 'auth_userprofile'
# this assertion is made here as a sanity check because all tests
# assume user.profile.has_profile_image is False by default
self.assertFalse(self.user.profile.has_profile_image)
# Reset the mock event tracker so that we're not considering the
# initial profile creation events.
self.reset_tracker()
def tearDown(self):
super(ProfileImageEndpointMixin, self).tearDown()
for name in get_profile_image_names(self.user.username).values():
self.storage.delete(name)
def check_images(self, exist=True):
"""
If exist is True, make sure the images physically exist in storage
with correct sizes and formats.
If exist is False, make sure none of the images exist.
"""
for size, name in get_profile_image_names(self.user.username).items():
if exist:
self.assertTrue(self.storage.exists(name))
with closing(Image.open(self.storage.path(name))) as img:
self.assertEqual(img.size, (size, size))
self.assertEqual(img.format, 'JPEG')
else:
self.assertFalse(self.storage.exists(name))
def check_response(self, response, expected_code, expected_developer_message=None, expected_user_message=None):
"""
Make sure the response has the expected code, and if that isn't 204,
optionally check the correctness of a developer-facing message.
"""
self.assertEqual(expected_code, response.status_code)
if expected_code == 204:
self.assertIsNone(response.data)
else:
if expected_developer_message is not None:
self.assertEqual(response.data.get('developer_message'), expected_developer_message)
if expected_user_message is not None:
self.assertEqual(response.data.get('user_message'), expected_user_message)
def check_has_profile_image(self, has_profile_image=True):
"""
Make sure the value of self.user.profile.has_profile_image is what we
expect.
"""
# it's necessary to reload this model from the database since save()
# would have been called on another instance.
profile = self.user.profile.__class__.objects.get(user=self.user)
self.assertEqual(profile.has_profile_image, has_profile_image)
def check_anonymous_request_rejected(self, method):
"""
Make sure that the specified method rejects access by unauthorized users.
"""
anonymous_client = APIClient()
request_method = getattr(anonymous_client, method)
response = request_method(self.url)
self.check_response(response, 401)
self.assert_no_events_were_emitted()
@attr(shard=2)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewGeneralTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the profile image endpoint
"""
_view_name = "accounts_profile_image_api"
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, and PATCH are not supported.
"""
self.assertEqual(405, self.client.get(self.url).status_code)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@attr(shard=2)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewPostTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the POST method of the profile_image api endpoint.
"""
_view_name = "accounts_profile_image_api"
# Use the patched version of the API client to workaround a unicode issue
# with DRF 3.1 and Django 1.4. Remove this after we upgrade Django past 1.4!
def check_upload_event_emitted(self, old=None, new=TEST_UPLOAD_DT):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=old, new=new
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call POST.
"""
self.check_anonymous_request_rejected('post')
self.assertFalse(mock_log.info.called)
@ddt.data('.jpg', '.jpeg', '.jpg', '.jpeg', '.png', '.gif', '.GIF')
@patch(
'openedx.core.djangoapps.profile_images.views._make_upload_dt',
side_effect=[TEST_UPLOAD_DT, TEST_UPLOAD_DT2],
)
def test_upload_self(self, extension, _mock_make_image_version, mock_log):
"""
Test that an authenticated user can POST to their own upload endpoint.
"""
with make_image_file(extension=extension) as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
# Try another upload and make sure that a second event is emitted.
with make_image_file() as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_upload_event_emitted(old=TEST_UPLOAD_DT, new=TEST_UPLOAD_DT2)
@ddt.data(
('image/jpeg', '.jpg'),
('image/jpeg', '.jpeg'),
('image/pjpeg', '.jpg'),
('image/pjpeg', '.jpeg'),
('image/png', '.png'),
('image/gif', '.gif'),
('image/gif', '.GIF'),
)
@ddt.unpack
@patch('openedx.core.djangoapps.profile_images.views._make_upload_dt', return_value=TEST_UPLOAD_DT)
def test_upload_by_mimetype(self, content_type, extension, _mock_make_image_version, mock_log):
"""
Test that a user can upload raw content with the appropriate mimetype
"""
with make_image_file(extension=extension) as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type=content_type,
HTTP_CONTENT_DISPOSITION='attachment;filename=filename{}'.format(extension),
)
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
def test_upload_unsupported_mimetype(self, mock_log):
"""
Test that uploading an unsupported image as raw content fails with an
HTTP 415 Error.
"""
with make_image_file() as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type='image/tiff',
HTTP_CONTENT_DISPOSITION='attachment;filename=filename.tiff',
)
self.check_response(response, 415)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_other(self, mock_log):
"""
Test that an authenticated user cannot POST to another user's upload
endpoint.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = different_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 404)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_staff(self, mock_log):
"""
Test that an authenticated staff cannot POST to another user's upload
endpoint.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = staff_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 403)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_missing_file(self, mock_log):
"""
Test that omitting the file entirely from the POST results in HTTP 400.
"""
response = self.client.post(self.url, {}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"No file provided for profile image",
expected_user_message=u"No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_not_a_file(self, mock_log):
"""
Test that sending unexpected data that isn't a file results in HTTP
400.
"""
response = self.client.post(self.url, {'file': 'not a file'}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"No file provided for profile image",
expected_user_message=u"No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_validation(self, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
with make_image_file() as image_file:
with mock.patch(
'openedx.core.djangoapps.profile_images.views.validate_uploaded_image',
side_effect=ImageValidationError(u"test error message")
):
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"test error message",
expected_user_message=u"test error message",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@patch('PIL.Image.open')
def test_upload_failure(self, image_open, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
image_open.side_effect = [Exception(u"whoops"), None]
with make_image_file() as image_file:
with self.assertRaises(Exception):
self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@attr(shard=2)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewDeleteTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the DELETE method of the profile_image endpoint.
"""
_view_name = "accounts_profile_image_api"
def setUp(self):
super(ProfileImageViewDeleteTestCase, self).setUp()
with make_image_file() as image_file:
create_profile_images(image_file, get_profile_image_names(self.user.username))
self.check_images()
set_has_profile_image(self.user.username, True, TEST_UPLOAD_DT)
# Ignore previous event
self.reset_tracker()
def check_remove_event_emitted(self):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=TEST_UPLOAD_DT, new=None
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call DELETE.
"""
self.check_anonymous_request_rejected('delete')
self.assertFalse(mock_log.info.called)
def test_remove_self(self, mock_log):
"""
Test that an authenticated user can DELETE to remove their own profile
images.
"""
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
def test_remove_other(self, mock_log):
"""
Test that an authenticated user cannot DELETE to remove another user's
profile images.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
response = different_client.delete(self.url)
self.check_response(response, 404)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_remove_staff(self, mock_log):
"""
Test that an authenticated staff user can DELETE to remove another user's
profile images.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
@patch('student.models.UserProfile.save')
def test_remove_failure(self, user_profile_save, mock_log):
"""
Test that when remove validation fails, the proper HTTP response and
messages are returned.
"""
user_profile_save.side_effect = [Exception(u"whoops"), None]
with self.assertRaises(Exception):
self.client.delete(self.url)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
class DeprecatedProfileImageTestMixin(ProfileImageEndpointMixin):
"""
Actual tests for DeprecatedProfileImage.*TestCase classes defined here.
Requires:
self._view_name
self._replacement_method
"""
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, PATCH, and DELETE are not supported.
"""
self.assertEqual(405, self.client.get(self.url).status_code)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
self.assertEqual(405, self.client.delete(self.url).status_code)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_post_calls_replacement_view_method(self, mock_log):
"""
Test that calls to this view pass through the the new view.
"""
with patch(self._replacement_method) as mock_method:
mock_method.return_value = HttpResponse()
self.client.post(self.url)
assert mock_method.called
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@attr(shard=2)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageUploadTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image upload endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = 'profile_image_upload'
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.post'
@attr(shard=2)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageRemoveTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image remove endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = "profile_image_remove"
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.delete'
|
iDTLabssl/hr
|
refs/heads/8.0
|
__unported__/hr_schedule/__init__.py
|
28
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr_schedule
from . import wizard
|
twolfson/FindPlusPlus
|
refs/heads/master
|
DirectoryPanel.py
|
1
|
"""
Shamlessly copied/modified from SublimeQuickFileCreator
https://github.com/noklesta/SublimeQuickFileCreator
"""
import os
import re
import sublime
import sublime_plugin
SETTINGS_KEY = 'FindPlusPlus'
class DirectoryPanel(sublime_plugin.WindowCommand):
relative_paths = []
full_torelative_paths = {}
rel_path_start = 0
def complete(self):
# If there ia selected directory, callback with it
selected_dir = self.selected_dir
if selected_dir:
self.cb(selected_dir)
def open_panel(self, cb):
# Build out exclude pattern, paths, and save cb
self.construct_excluded_pattern()
self.build_relative_paths()
self.cb = cb
# If there is only one directory, return early with it
if len(self.relative_paths) == 1:
self.selected_dir = self.relative_paths[0]
self.selected_dir = self.full_torelative_paths[self.selected_dir]
self.complete()
# Otherwise, if there are multiple directories, open a panel to search on
elif len(self.relative_paths) > 1:
self.move_current_directory_to_top()
self.window.show_quick_panel(self.relative_paths, self.dir_selected)
# Otherwise, attempt to resolve the directory of the current file
else:
view = self.window.active_view()
self.selected_dir = os.path.dirname(view.file_name())
self.complete()
def construct_excluded_pattern(self):
patterns = [pat.replace('|', '\\') for pat in self.get_setting('excluded_dir_patterns')]
self.excluded = re.compile('|'.join(patterns))
def get_setting(self, key):
settings = None
view = self.window.active_view()
if view:
settings = self.window.active_view().settings()
if settings and settings.has(SETTINGS_KEY) and key in settings.get(SETTINGS_KEY):
# Get project-specific setting
results = settings.get(SETTINGS_KEY)[key]
else:
# Get user-specific or default setting
settings = sublime.load_settings('%s.sublime-settings' % SETTINGS_KEY)
results = settings.get(key)
return results
def build_relative_paths(self):
folders = self.window.folders()
self.relative_paths = []
self.full_torelative_paths = {}
for path in folders:
rootfolders = os.path.split(path)[-1]
self.rel_path_start = len(os.path.split(path)[0]) + 1
if not self.excluded.search(rootfolders):
self.full_torelative_paths[rootfolders] = path
self.relative_paths.append(rootfolders)
for base, dirs, files in os.walk(path):
for dir in dirs:
relative_path = os.path.join(base, dir)[self.rel_path_start:]
if not self.excluded.search(relative_path):
self.full_torelative_paths[relative_path] = os.path.join(base, dir)
self.relative_paths.append(relative_path)
def move_current_directory_to_top(self):
view = self.window.active_view()
if view and view.file_name():
cur_dir = os.path.dirname(view.file_name())[self.rel_path_start:]
if cur_dir in self.full_torelative_paths:
i = self.relative_paths.index(cur_dir)
self.relative_paths.insert(0, self.relative_paths.pop(i))
else:
self.relative_paths.insert(0, os.path.dirname(view.file_name()))
return
def dir_selected(self, selected_index):
if selected_index != -1:
self.selected_dir = self.relative_paths[selected_index]
self.selected_dir = self.full_torelative_paths[self.selected_dir]
self.complete()
|
NeCTAR-RC/murano
|
refs/heads/master
|
murano/tests/unit/api/middleware/test_ssl.py
|
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api.middleware import ssl
from murano.tests.unit import base
class SSLMiddlewareTest(base.MuranoTestCase):
def test_ssl_middleware_default_forwarded_proto(self):
middleware = ssl.SSLMiddleware(None)
request = webob.Request.blank('/environments',
headers={'X-Forwarded-Proto': 'https'})
middleware.process_request(request)
self.assertEqual('https',
request.environ['wsgi.url_scheme'])
def test_ssl_middleware_custon_forwarded_proto(self):
self.override_config('secure_proxy_ssl_header',
'X-My-Forwarded-Proto')
middleware = ssl.SSLMiddleware(None)
request = webob.Request.blank('/environments',
headers={
'X-My-Forwarded-Proto': 'https'})
middleware.process_request(request)
self.assertEqual('https',
request.environ['wsgi.url_scheme'])
def test_ssl_middleware_plain_request(self):
middleware = ssl.SSLMiddleware(None)
request = webob.Request.blank('/environments', headers={})
middleware.process_request(request)
self.assertEqual('http',
request.environ['wsgi.url_scheme'])
|
a-d-j-i/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_groups/protocolentities/notification_groups.py
|
32
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_notifications.protocolentities import NotificationProtocolEntity
class GroupsNotificationProtocolEntity(NotificationProtocolEntity):
'''
<notification notify="WhatsApp" id="{{id}}" t="1420402514" participant="{{participant_jiid}}" from="{{group_jid}}" type="w:gp2">
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, participant, offline):
super(GroupsNotificationProtocolEntity, self).__init__("w:gp2", _id, _from, timestamp, notify, offline)
self.setParticipant(participant)
self.setGroupId(_from)
def setParticipant(self, participant):
self._participant = participant
def getParticipant(self, full = True):
return self._participant if full else self._participant.split('@')[0]
def getGroupId(self):
return self._id
def setGroupId(self, groupId):
self._id = groupId
def __str__(self):
out = super(GroupsNotificationProtocolEntity, self).__str__()
out += "Participant: %s\n" % self.getParticipant()
return out
def toProtocolTreeNode(self):
node = super(GroupsNotificationProtocolEntity, self).toProtocolTreeNode()
node.setAttribute("participant", self.getParticipant())
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(GroupsNotificationProtocolEntity, GroupsNotificationProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = GroupsNotificationProtocolEntity
entity.setParticipant(node.getAttributeValue("participant"))
entity.setGroupId(node.getAttributeValue("from"))
return entity
|
felixonmars/braintree_python
|
refs/heads/master
|
braintree/risk_data.py
|
5
|
from braintree.attribute_getter import AttributeGetter
class RiskData(AttributeGetter):
pass
|
venkey-ariv/fullerite
|
refs/heads/master
|
src/diamond/collectors/amavis/amavis.py
|
20
|
# coding=utf-8
"""
Collector that reports amavis metrics as reported by amavisd-agent
#### Dependencies
* amavisd-agent must be present in PATH
"""
import os
import subprocess
import re
import diamond.collector
import diamond.convertor
class AmavisCollector(diamond.collector.Collector):
# From the source of amavisd-agent and it seems like the three interesting
# formats are these: ("x y/h", "xMB yMB/h", "x s y s/msg"),
# so this, ugly as it is to hardcode it this way, it should be right.
#
# The other option would be to directly read and decode amavis' berkeley
# db, and I don't even want to get there
matchers = [
re.compile(r'^\s*(?P<name>sysUpTime)\s+TimeTicks\s+(?P<time>\d+)\s+'
r'\([\w:\., ]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w]+)\s+(?P<time>[\d]+) s\s+'
r'(?P<frequency>[\d.]+) s/msg\s+\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<count>[\d]+)\s+'
r'(?P<frequency>[\d.]+)/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<size>[\d]+)MB\s+'
r'(?P<frequency>[\d.]+)MB/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
]
def get_default_config_help(self):
config_help = super(AmavisCollector, self).get_default_config_help()
config_help.update({
'amavisd_exe': 'The path to amavisd-agent',
'use_sudo': 'Call amavisd-agent using sudo',
'sudo_exe': 'The path to sudo',
'sudo_user': 'The user to use if using sudo',
})
return config_help
def get_default_config(self):
config = super(AmavisCollector, self).get_default_config()
config.update({
'path': 'amavis',
'amavisd_exe': '/usr/sbin/amavisd-agent',
'use_sudo': False,
'sudo_exe': '/usr/bin/sudo',
'sudo_user': 'amavis',
})
return config
def collect(self):
"""
Collect memory stats
"""
try:
if self.config['use_sudo']:
# Use -u instead of --user as the former is more portable. Not
# all versions of sudo support the long form --user.
cmdline = [
self.config['sudo_exe'], '-u', self.config['sudo_user'],
'--', self.config['amavisd_exe'], '-c', '1'
]
else:
cmdline = [self.config['amavisd_exe'], '-c', '1']
agent = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
agent_out = agent.communicate()[0]
lines = agent_out.strip().split(os.linesep)
for line in lines:
for rex in self.matchers:
res = rex.match(line)
if res:
groups = res.groupdict()
name = groups['name']
for metric, value in groups.items():
if metric == 'name':
continue
mtype = 'GAUGE'
if metric in ('count', 'time'):
mtype = 'COUNTER'
self.publish("{0}.{1}".format(name, metric),
value, metric_type=mtype)
except OSError as err:
self.log.error("Could not run %s: %s",
self.config['amavisd_exe'],
err)
return None
return True
|
TomasTomecek/trello-reporter
|
refs/heads/master
|
trello_reporter/harvesting/apps.py
|
2
|
from __future__ import unicode_literals
from django.apps import AppConfig
class HarvestingConfig(AppConfig):
name = 'harvesting'
|
sergiocazzolato/snapd
|
refs/heads/master
|
tests/lib/snaps/store/test-snapd-autopilot-consumer/provider.py
|
8
|
#!/usr/bin/env python3
from gi.repository import GLib
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
class DBusProvider(dbus.service.Object):
def __init__(self):
bus = dbus.SessionBus()
bus_name = dbus.service.BusName(
"com.canonical.Autopilot.Introspection", bus=bus
)
dbus.service.Object.__init__(
self, bus_name, "/com/canonical/Autopilot/Introspection"
)
@dbus.service.method(
dbus_interface="com.canonical.Autopilot.Introspection", out_signature="s"
)
def GetVersion(self):
return "my-ap-version"
@dbus.service.method(
dbus_interface="com.canonical.Autopilot.Introspection", out_signature="s"
)
def GetState(self):
return "my-ap-state"
if __name__ == "__main__":
DBusProvider()
loop = GLib.MainLoop()
loop.run()
|
ZazieTheBeast/oscar
|
refs/heads/master
|
src/oscar/apps/basket/forms.py
|
6
|
from django import forms
from django.conf import settings
from django.db.models import Sum
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_model
from oscar.forms import widgets
Line = get_model('basket', 'line')
Basket = get_model('basket', 'basket')
Product = get_model('catalogue', 'product')
class BasketLineForm(forms.ModelForm):
save_for_later = forms.BooleanField(
initial=False, required=False, label=_('Save for Later'))
def __init__(self, strategy, *args, **kwargs):
super(BasketLineForm, self).__init__(*args, **kwargs)
self.instance.strategy = strategy
def clean_quantity(self):
qty = self.cleaned_data['quantity']
if qty > 0:
self.check_max_allowed_quantity(qty)
self.check_permission(qty)
return qty
def check_max_allowed_quantity(self, qty):
# Since `Basket.is_quantity_allowed` checks quantity of added product
# against total number of the products in the basket, instead of sending
# updated quantity of the product, we send difference between current
# number and updated. Thus, product already in the basket and we don't
# add second time, just updating number of items.
qty_delta = qty - self.instance.quantity
is_allowed, reason = self.instance.basket.is_quantity_allowed(qty_delta)
if not is_allowed:
raise forms.ValidationError(reason)
def check_permission(self, qty):
policy = self.instance.purchase_info.availability
is_available, reason = policy.is_purchase_permitted(
quantity=qty)
if not is_available:
raise forms.ValidationError(reason)
class Meta:
model = Line
fields = ['quantity']
class BaseBasketLineFormSet(BaseModelFormSet):
def __init__(self, strategy, *args, **kwargs):
self.strategy = strategy
super(BaseBasketLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseBasketLineFormSet, self)._construct_form(
i, strategy=self.strategy, **kwargs)
def _should_delete_form(self, form):
"""
Quantity of zero is treated as if the user checked the DELETE checkbox,
which results in the basket line being deleted
"""
if super(BaseBasketLineFormSet, self)._should_delete_form(form):
return True
if self.can_delete and 'quantity' in form.cleaned_data:
return form.cleaned_data['quantity'] == 0
BasketLineFormSet = modelformset_factory(
Line, form=BasketLineForm, formset=BaseBasketLineFormSet, extra=0,
can_delete=True)
class SavedLineForm(forms.ModelForm):
move_to_basket = forms.BooleanField(initial=False, required=False,
label=_('Move to Basket'))
class Meta:
model = Line
fields = ('id', 'move_to_basket')
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(SavedLineForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(SavedLineForm, self).clean()
if not cleaned_data['move_to_basket']:
# skip further validation (see issue #666)
return cleaned_data
# Get total quantity of all lines with this product (there's normally
# only one but there can be more if you allow product options).
lines = self.basket.lines.filter(product=self.instance.product)
current_qty = lines.aggregate(Sum('quantity'))['quantity__sum'] or 0
desired_qty = current_qty + self.instance.quantity
result = self.strategy.fetch_for_product(self.instance.product)
is_available, reason = result.availability.is_purchase_permitted(
quantity=desired_qty)
if not is_available:
raise forms.ValidationError(reason)
return cleaned_data
class BaseSavedLineFormSet(BaseModelFormSet):
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(BaseSavedLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseSavedLineFormSet, self)._construct_form(
i, strategy=self.strategy, basket=self.basket, **kwargs)
SavedLineFormSet = modelformset_factory(Line, form=SavedLineForm,
formset=BaseSavedLineFormSet, extra=0,
can_delete=True)
class BasketVoucherForm(forms.Form):
code = forms.CharField(max_length=128, label=_('Code'))
def __init__(self, *args, **kwargs):
super(BasketVoucherForm, self).__init__(*args, **kwargs)
def clean_code(self):
return self.cleaned_data['code'].strip().upper()
class AddToBasketForm(forms.Form):
quantity = forms.IntegerField(initial=1, min_value=1, label=_('Quantity'))
def __init__(self, basket, product, *args, **kwargs):
# Note, the product passed in here isn't necessarily the product being
# added to the basket. For child products, it is the *parent* product
# that gets passed to the form. An optional product_id param is passed
# to indicate the ID of the child product being added to the basket.
self.basket = basket
self.parent_product = product
super(AddToBasketForm, self).__init__(*args, **kwargs)
# Dynamically build fields
if product.is_parent:
self._create_parent_product_fields(product)
self._create_product_fields(product)
# Dynamic form building methods
def _create_parent_product_fields(self, product):
"""
Adds the fields for a "group"-type product (eg, a parent product with a
list of children.
Currently requires that a stock record exists for the children
"""
choices = []
disabled_values = []
for child in product.children.all():
# Build a description of the child, including any pertinent
# attributes
attr_summary = child.attribute_summary
if attr_summary:
summary = attr_summary
else:
summary = child.get_title()
# Check if it is available to buy
info = self.basket.strategy.fetch_for_product(child)
if not info.availability.is_available_to_buy:
disabled_values.append(child.id)
choices.append((child.id, summary))
self.fields['child_id'] = forms.ChoiceField(
choices=tuple(choices), label=_("Variant"),
widget=widgets.AdvancedSelect(disabled_values=disabled_values))
def _create_product_fields(self, product):
"""
Add the product option fields.
"""
for option in product.options:
self._add_option_field(product, option)
def _add_option_field(self, product, option):
"""
Creates the appropriate form field for the product option.
This is designed to be overridden so that specific widgets can be used
for certain types of options.
"""
kwargs = {'required': option.is_required}
self.fields[option.code] = forms.CharField(**kwargs)
# Cleaning
def clean_child_id(self):
try:
child = self.parent_product.children.get(
id=self.cleaned_data['child_id'])
except Product.DoesNotExist:
raise forms.ValidationError(
_("Please select a valid product"))
# To avoid duplicate SQL queries, we cache a copy of the loaded child
# product as we're going to need it later.
self.child_product = child
return self.cleaned_data['child_id']
def clean_quantity(self):
# Check that the proposed new line quantity is sensible
qty = self.cleaned_data['quantity']
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.basket.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
raise forms.ValidationError(
_("Due to technical limitations we are not able to ship"
" more than %(threshold)d items in one order. Your"
" basket currently has %(basket)d items.")
% {'threshold': basket_threshold,
'basket': total_basket_quantity})
return qty
@property
def product(self):
"""
The actual product being added to the basket
"""
# Note, the child product attribute is saved in the clean_child_id
# method
return getattr(self, 'child_product', self.parent_product)
def clean(self):
info = self.basket.strategy.fetch_for_product(self.product)
# Check currencies are sensible
if (self.basket.currency and
info.price.currency != self.basket.currency):
raise forms.ValidationError(
_("This product cannot be added to the basket as its currency "
"isn't the same as other products in your basket"))
# Check user has permission to add the desired quantity to their
# basket.
current_qty = self.basket.product_quantity(self.product)
desired_qty = current_qty + self.cleaned_data.get('quantity', 1)
is_permitted, reason = info.availability.is_purchase_permitted(
desired_qty)
if not is_permitted:
raise forms.ValidationError(reason)
return self.cleaned_data
# Helpers
def cleaned_options(self):
"""
Return submitted options in a clean format
"""
options = []
for option in self.parent_product.options:
if option.code in self.cleaned_data:
options.append({
'option': option,
'value': self.cleaned_data[option.code]})
return options
class SimpleAddToBasketForm(AddToBasketForm):
"""
Simplified version of the add to basket form where the quantity is
defaulted to 1 and rendered in a hidden widget
"""
quantity = forms.IntegerField(
initial=1, min_value=1, widget=forms.HiddenInput, label=_('Quantity'))
|
scorphus/django
|
refs/heads/master
|
tests/gis_tests/geoadmin/models.py
|
300
|
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.encoding import python_2_unicode_compatible
from ..admin import admin
from ..models import models
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
class Meta:
app_label = 'geoadmin'
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
site = admin.AdminSite(name='admin_gis')
if HAS_GDAL:
site.register(City, admin.OSMGeoAdmin)
|
zzicewind/nova
|
refs/heads/master
|
nova/tests/functional/v3/test_pci.py
|
3
|
# Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"key1": "value1",
"numa_node": 1}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
|
lukeiwanski/tensorflow-opencl
|
refs/heads/master
|
tensorflow/python/kernel_tests/dynamic_partition_op_test.py
|
97
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
def testSimpleOneDimensional(self):
with self.test_session() as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17])
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.test_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]])
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testHigherRank(self):
np.random.seed(7)
with self.test_session() as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = sess.run(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testErrorIndexOutOfRange(self):
with self.test_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
sess.run(partitions)
def testScalarIndexOutOfRange(self):
with self.test_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
sess.run(partitions)
def testHigherRankIndexOutOfRange(self):
with self.test_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
def testErrorWrongDimsIndices(self):
data = constant_op.constant([[0], [1], [2]])
indices = constant_op.constant([[0], [0]])
with self.assertRaises(ValueError):
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
if __name__ == "__main__":
test.main()
|
haiiiiiyun/scrapy
|
refs/heads/master
|
scrapy/utils/multipart.py
|
211
|
"""
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.form instead of this module
"""
from w3lib.form import *
|
chrismeyersfsu/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/package.py
|
47
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
maintainers:
- Ansible Core Team
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
'''
EXAMPLES = '''
- name: install the latest version of ntpdate
package:
name: ntpdate
state: latest
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
|
romain-li/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/management/__init__.py
|
12133432
| |
mlperf/training_results_v0.7
|
refs/heads/master
|
DellEMC/benchmarks/transformer/implementation/pytorch/fairseq/data/token_block_dataset.py
|
6
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
class TokenBlockDataset(torch.utils.data.Dataset):
"""Break a 1d tensor of tokens into blocks.
The blocks are fetched from the original tensor so no additional memory is allocated.
Args:
tokens: 1d tensor of tokens to break into blocks
sizes: sentence lengths (required for 'complete' and 'eos')
block_size: maximum block size (ignored in 'eos' break mode)
break_mode: Mode used for breaking tokens. Values can be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'eos': each block contains one sentence (block_size is ignored)
include_targets: return next tokens as targets
"""
def __init__(self, tokens, sizes, block_size, break_mode=None, include_targets=False):
super().__init__()
self.tokens = tokens
self.total_size = len(tokens)
self.include_targets = include_targets
self.slice_indices = []
if break_mode is None or break_mode == 'none':
length = math.ceil(len(tokens) / block_size)
def block_at(i):
start = i * block_size
end = min(start + block_size, len(tokens))
return (start, end)
self.slice_indices = [block_at(i) for i in range(length)]
elif break_mode == 'complete':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
tok_idx = 0
sz_idx = 0
curr_size = 0
while sz_idx < len(sizes):
if curr_size + sizes[sz_idx] <= block_size or curr_size == 0:
curr_size += sizes[sz_idx]
sz_idx += 1
else:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if curr_size > 0:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
elif break_mode == 'eos':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
curr = 0
for sz in sizes:
# skip samples with just 1 example (which would be just the eos token)
if sz > 1:
self.slice_indices.append((curr, curr + sz))
curr += sz
else:
raise ValueError('Invalid break_mode: ' + break_mode)
self.sizes = np.array([e - s for s, e in self.slice_indices])
def __getitem__(self, index):
s, e = self.slice_indices[index]
item = torch.LongTensor(self.tokens[s:e])
if self.include_targets:
# target is the sentence, for source, rotate item one token to the left (would start with eos)
if s == 0:
source = np.concatenate([self.tokens[-1:], self.tokens[0:e - 1]])
else:
source = self.tokens[s - 1:e - 1]
return torch.LongTensor(source), item
return item
def __len__(self):
return len(self.slice_indices)
|
MounirMesselmeni/django
|
refs/heads/master
|
tests/middleware_exceptions/tests.py
|
119
|
import sys
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template import engines
from django.template.response import TemplateResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
template = engines['django'].from_string('Template Response Middleware')
return TemplateResponse(request, template)
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
# Sample middlewares that omit to return an HttpResonse
class NoTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(NoTemplateResponseMiddleware, self).process_template_response(request, response)
class NoResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(NoResponseMiddleware, self).process_response(request, response)
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class BaseMiddlewareExceptionTest(SimpleTestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
self.client.get(url)
except TestException:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception as e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
@override_settings(
MIDDLEWARE_CLASSES=['middleware_exceptions.middleware.ProcessExceptionMiddleware'],
)
def test_exception_in_render_passed_to_process_exception(self):
# Repopulate the list of middlewares since it's already been populated
# by setUp() before the MIDDLEWARE_CLASSES setting got overridden
self.client.handler.load_middleware()
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/',
['Test Template Response Exception']
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead.",
'Test Response Exception'
]
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [
"NoResponseMiddleware.process_response didn't return an HttpResponse object. It returned None instead."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/', [
"NoTemplateResponseMiddleware.process_template_response didn't "
"return an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
_missing = object()
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
self.assertRaises(AttributeError, self.client.get, "/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddleware',
])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddlewareWithExceptionMessage',
])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
|
zanderle/django
|
refs/heads/master
|
django/middleware/cache.py
|
372
|
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key, get_max_age, has_vary_header, learn_cache_key,
patch_response_headers,
)
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if request.method not in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.cache = caches[self.cache_alias]
|
jneves/django-storages
|
refs/heads/master
|
docs/conf.py
|
32
|
# -*- coding: utf-8 -*-
#
# django-storages documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 28 13:44:45 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import storages
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-storages'
copyright = u'2011-2013, David Larlet, et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = storages.__version__
# The full version, including alpha/beta/rc tags.
release = storages.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-storagesdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-storages.tex', u'django-storages Documentation',
u'David Larlet, et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-storages', u'django-storages Documentation',
[u'David Larlet, et. al.'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'django-storages'
epub_author = u'David Larlet, et. al.'
epub_publisher = u'David Larlet, et. al.'
epub_copyright = u'2011-2013, David Larlet, et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
jordanemedlock/psychtruths
|
refs/heads/master
|
temboo/core/Library/RunKeeper/Nutrition/DeleteEntry.py
|
4
|
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteEntry
# Removes an individual nutrition entry from a user’s feed.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteEntry(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteEntry Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteEntry, self).__init__(temboo_session, '/Library/RunKeeper/Nutrition/DeleteEntry')
def new_input_set(self):
return DeleteEntryInputSet()
def _make_result_set(self, result, path):
return DeleteEntryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteEntryChoreographyExecution(session, exec_id, path)
class DeleteEntryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteEntry
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved after the final step in the OAuth process.)
"""
super(DeleteEntryInputSet, self)._set_input('AccessToken', value)
def set_EntryID(self, value):
"""
Set the value of the EntryID input for this Choreo. ((required, string) This can be the individual id of the nutrition entry, or you can pass the full uri for the entry as returned from the RetrieveEntries Choreo (i.e. /nutrition/-12985593-1350864000000).)
"""
super(DeleteEntryInputSet, self)._set_input('EntryID', value)
class DeleteEntryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteEntry Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) Contains the string "true" when an entry is deleted successfully.)
"""
return self._output.get('Response', None)
class DeleteEntryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteEntryResultSet(response, path)
|
tltuan/coala
|
refs/heads/master
|
coalib/bearlib/abstractions/SectionCreatable.py
|
25
|
from coalib.settings.FunctionMetadata import FunctionMetadata
class SectionCreatable:
"""
A SectionCreatable is an object that is creatable out of a section object.
Thus this is the class for many helper objects provided by the bearlib.
If you want to use an object that inherits from this class the following
approach is recommended: Instantiate it via the from_section method. You
can provide default arguments via the lower case keyword arguments.
Example:
::
SpacingHelper.from_section(section, tabwidth=8)
creates a SpacingHelper and if the "tabwidth" setting is needed and not
contained in section, 8 will be taken.
It is recommended to write the prototype of the __init__ method according
to this example:
::
def __init__(self, setting_one: int, setting_two: bool=False):
pass # Implementation
This way the get_optional_settings and the get_non_optional_settings method
will extract automatically that:
- setting_one should be an integer
- setting_two should be a bool and defaults to False
If you write a documentation comment, you can use :param to add
descriptions to your parameters. These will be available too automatically.
"""
def __init__(self):
pass # Method needs to be available
@classmethod
def from_section(cls, section, **kwargs):
"""
Creates the object from a section object.
:param section: A section object containing at least the settings
specified by get_non_optional_settings()
:param kwargs: Additional keyword arguments
"""
kwargs.update(cls.get_metadata().create_params_from_section(section))
return cls(**kwargs)
@classmethod
def get_metadata(cls):
return FunctionMetadata.from_function(cls.__init__, omit={"self"})
@classmethod
def get_non_optional_settings(cls):
"""
Retrieves the minimal set of settings that need to be defined in order
to use this object.
:return: a dictionary of needed settings as keys and help texts as
values
"""
return cls.get_metadata().non_optional_params
@classmethod
def get_optional_settings(cls):
"""
Retrieves the settings needed IN ADDITION to the ones of
get_non_optional_settings to use this object without internal defaults.
:return: a dictionary of needed settings as keys and help texts as
values
"""
return cls.get_metadata().optional_params
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
homeassistant/components/syslog/__init__.py
|
36
|
"""The syslog component."""
|
graingert/pip
|
refs/heads/develop
|
tests/lib/scripttest.py
|
62
|
from __future__ import absolute_import
from . import PipTestEnvironment # noqa
|
yjmade/odoo
|
refs/heads/8.0
|
addons/website_event_track/controllers/event.py
|
332
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import datetime
import re
import pytz
import openerp
import openerp.tools
from openerp.addons.web import http
from openerp.addons.web.http import request
class website_event(http.Controller):
@http.route(['''/event/<model("event.event"):event>/track/<model("event.track", "[('event_id','=',event[0])]"):track>'''], type='http', auth="public", website=True)
def event_track_view(self, event, track, **post):
track_obj = request.registry.get('event.track')
track = track_obj.browse(request.cr, openerp.SUPERUSER_ID, track.id, context=request.context)
values = { 'track': track, 'event': track.event_id, 'main_object': track }
return request.website.render("website_event_track.track_view", values)
def _prepare_calendar(self, event, event_track_ids):
local_tz = pytz.timezone(event.timezone_of_event or 'UTC')
locations = {} # { location: [track, start_date, end_date, rowspan]}
dates = [] # [ (date, {}) ]
for track in event_track_ids:
locations.setdefault(track.location_id or False, [])
forcetr = True
for track in event_track_ids:
start_date = (datetime.datetime.strptime(track.date, '%Y-%m-%d %H:%M:%S')).replace(tzinfo=pytz.utc).astimezone(local_tz)
end_date = start_date + datetime.timedelta(hours = (track.duration or 0.5))
location = track.location_id or False
locations.setdefault(location, [])
# New TR, align all events
if forcetr or (start_date>dates[-1][0]) or not location:
dates.append((start_date, {}, bool(location)))
for loc in locations.keys():
if locations[loc] and (locations[loc][-1][2] > start_date):
locations[loc][-1][3] += 1
elif not locations[loc] or locations[loc][-1][2] < start_date:
locations[loc].append([False, locations[loc] and locations[loc][-1][2] or dates[0][0], start_date, 1])
dates[-1][1][loc] = locations[loc][-1]
forcetr = not bool(location)
# Add event
if locations[location] and locations[location][-1][1] > start_date:
locations[location][-1][3] -= 1
locations[location].append([track, start_date, end_date, 1])
dates[-1][1][location] = locations[location][-1]
return {
'locations': locations,
'dates': dates
}
# TODO: not implemented
@http.route(['''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/agenda'''], type='http', auth="public", website=True)
def event_agenda(self, event, tag=None, **post):
days_tracks = collections.defaultdict(lambda: [])
for track in sorted(event.track_ids, key=lambda x: (x.date, bool(x.location_id))):
if not track.date: continue
days_tracks[track.date[:10]].append(track)
days = {}
days_tracks_count = {}
for day, tracks in days_tracks.iteritems():
days_tracks_count[day] = len(tracks)
days[day] = self._prepare_calendar(event, tracks)
cr, uid, context = request.cr, request.uid, request.context
track_obj = request.registry['event.track']
tracks_ids = track_obj.search(cr, openerp.SUPERUSER_ID, [('event_id', '=', event.id)], context=context)
speakers = dict()
for t in track_obj.browse(cr, openerp.SUPERUSER_ID, tracks_ids, context=context):
acc = ""
for speaker in t.speaker_ids:
acc = speaker.name + u" – " + acc if acc else speaker.name
speakers[t.id] = acc
return request.website.render("website_event_track.agenda", {
'event': event,
'days': days,
'days_nbr': days_tracks_count,
'speakers': speakers,
'tag': tag
})
@http.route([
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track''',
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track/tag/<model("event.track.tag"):tag>'''
], type='http', auth="public", website=True)
def event_tracks(self, event, tag=None, **post):
searches = {}
if tag:
searches.update(tag=tag.id)
track_obj = request.registry.get('event.track')
track_ids = track_obj.search(request.cr, request.uid,
[("id", "in", [track.id for track in event.track_ids]), ("tag_ids", "=", tag.id)], context=request.context)
tracks = track_obj.browse(request.cr, request.uid, track_ids, context=request.context)
else:
tracks = event.track_ids
def html2text(html):
return re.sub(r'<[^>]+>', "", html)
values = {
'event': event,
'main_object': event,
'tracks': tracks,
'tags': event.tracks_tag_ids,
'searches': searches,
'html2text': html2text
}
return request.website.render("website_event_track.tracks", values)
@http.route(['''/event/<model("event.event", "[('show_track_proposal','=',1)]"):event>/track_proposal'''], type='http', auth="public", website=True)
def event_track_proposal(self, event, **post):
values = { 'event': event }
return request.website.render("website_event_track.event_track_proposal", values)
@http.route(['/event/<model("event.event"):event>/track_proposal/post'], type='http', auth="public", methods=['POST'], website=True)
def event_track_proposal_post(self, event, **post):
cr, uid, context = request.cr, request.uid, request.context
tobj = request.registry['event.track']
tags = []
for tag in event.allowed_track_tag_ids:
if post.get('tag_'+str(tag.id)):
tags.append(tag.id)
e = openerp.tools.escape
track_description = '''<section data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center">
<h2>%s</h2>
</div>
<div class="col-md-12">
<p>%s</p>
</div>
<div class="col-md-12">
<h3>About The Author</h3>
<p>%s</p>
</div>
</div>
</div>
</section>''' % (e(post['track_name']),
e(post['description']), e(post['biography']))
track_id = tobj.create(cr, openerp.SUPERUSER_ID, {
'name': post['track_name'],
'event_id': event.id,
'tag_ids': [(6, 0, tags)],
'user_id': False,
'description': track_description
}, context=context)
tobj.message_post(cr, openerp.SUPERUSER_ID, [track_id], body="""Proposed By: %s<br/>
Mail: <a href="mailto:%s">%s</a><br/>
Phone: %s""" % (e(post['partner_name']), e(post['email_from']),
e(post['email_from']), e(post['phone'])), context=context)
track = tobj.browse(cr, uid, track_id, context=context)
values = {'track': track, 'event':event}
return request.website.render("website_event_track.event_track_proposal_success", values)
|
baloo/shinken
|
refs/heads/debian/master
|
shinken/modules/livestatus_broker/livestatus_stack.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import Queue
class MyLifoQueue(Queue.Queue):
"""A class that implements a Fifo.
Python versions < 2.5 do not have the Queue.LifoQueue class.
MyLifoQueue overwrites methods of the Queue.Queue class and
then behaves like Queue.LifoQueue.
"""
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
class TopBaseLiveStatusStack(object):
pass
class LiveStatusStack(TopBaseLiveStatusStack):
"""A Lifo queue for filter functions.
This class inherits either from MyLifoQueue or Queue.LifoQueue
whatever is available with the current python version.
Public functions:
and_elements -- takes a certain number (given as argument)
of filters from the stack, creates a new filter and puts
this filter on the stack. If these filters are lambda functions,
the new filter is a boolean and of the underlying filters.
If the filters are sql where-conditions, they are also concatenated
with and to form a new string containing a more complex where-condition.
or_elements --- the same, only that the single filters are
combined with a logical or.
"""
def __xinit__(self, *args, **kw):
self.type = 'lambda'
print "i am a", type(self)
print "my parents are", [c.__name__ for c in self.__class__.__bases__]
print "my first parent is", self.__class__.__bases__[0].__name__
if self.__class__.__name__ == 'LiveStatusStack':
self.__class__.__bases__[0].__init__(self, *args, **kw)
def not_elements(self):
top_filter = self.get_stack()
def negate_filter(ref):
return not top_filter(ref)
self.put_stack(negate_filter)
def and_elements(self, num):
"""Take num filters from the stack, and them and put the result back"""
if num > 1:
filters = []
for _ in range(num):
filters.append(self.get_stack())
# Take from the stack:
# Make a combined anded function
# Put it on the stack
# List of functions taking parameter ref
def and_filter(ref):
myfilters = filters
failed = False
for filt in myfilters:
if not filt(ref):
failed = True
break
else:
pass
return not failed
self.put_stack(and_filter)
def or_elements(self, num):
"""Take num filters from the stack, or them and put the result back"""
if num > 1:
filters = []
for _ in range(num):
filters.append(self.get_stack())
def or_filter(ref):
myfilters = filters
failed = True
# Applying the filters in reversed order is faster. (Shown by measuring runtime)
for filt in reversed(myfilters):
if filt(ref):
failed = False
break
else:
pass
return not failed
self.put_stack(or_filter)
def get_stack(self):
"""Return the top element from the stack or a filter which is always true"""
if self.qsize() == 0:
return lambda x : True
else:
return self.get()
def put_stack(self, element):
"""Wrapper for a stack put operation which corresponds to get_stack"""
self.put(element)
try:
Queue.LifoQueue
TopBaseLiveStatusStack.__bases__ = (Queue.LifoQueue, object)
#LiveStatusStack.__bases__ += (Queue.LifoQueue, )
except AttributeError:
# Ptyhon 2.4 and 2.5 do not have it.
# Use our own implementation.
TopBaseLiveStatusStack.__bases__ = (MyLifoQueue, object)
#LiveStatusStack.__bases__ += (MyLifoQueue, )
|
vikas1885/test1
|
refs/heads/master
|
common/lib/xmodule/xmodule/annotator_token.py
|
211
|
"""
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
|
zubair-arbi/edx-platform
|
refs/heads/master
|
lms/djangoapps/verify_student/migrations/0013_auto__add_field_softwaresecurephotoverification_copy_id_photo_from.py
|
11
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SoftwareSecurePhotoVerification.copy_id_photo_from'
db.add_column('verify_student_softwaresecurephotoverification', 'copy_id_photo_from',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['verify_student.SoftwareSecurePhotoVerification'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.copy_id_photo_from'
db.delete_column('verify_student_softwaresecurephotoverification', 'copy_id_photo_from_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'verify_student.historicalverificationdeadline': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalVerificationDeadline'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'verify_student.incoursereverificationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'InCourseReverificationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'verify_student.skippedreverification': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'copy_id_photo_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'28d8227e-6c75-418a-b1ae-46b6b8e7557f'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_location'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'})
},
'verify_student.verificationdeadline': {
'Meta': {'object_name': 'VerificationDeadline'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
|
JoeriHermans/tensorflow-scripts
|
refs/heads/master
|
scripts/adverserial-variational-optimization/avo-gp.py
|
2
|
# Adverserial Variational Optimization
import math
import numpy as np
import random
import sys
import torch
import torch.nn.functional as F
from sklearn.utils import check_random_state
from torch.autograd import Variable
def main():
# Assume there exists some true parameterization.
# Beam Energy = 45 Gev, and Fermi's Constant is 0.9
theta_true = [41.0, 0.9]
# Assume there is an experiment drawing (real) samples from nature.
p_r = real_experiment(theta_true, 10000)
# Initialize the prior of theta, parameterized by a Gaussian.
proposal = {'mu': [], 'sigma': []}
# Check if a custom mu has been specified.
if '--mu' in sys.argv:
mu = sys.argv[sys.argv.index('--mu') + 1].split(",")
mu = [float(e) for e in mu]
proposal['mu'] = mu
proposal['sigma'] = [1., 1.]
else:
# Add random beam energy.
add_prior_beam_energy(proposal)
# Add random Fermi constant.
add_prior_fermi_constant(proposal)
# Check if a custom sigma has been specified.
if '--sigma' in sys.argv and '--mu' in sys.argv:
sigma = sys.argv[sys.argv.index('--sigma') + 1].split(",")
sigma = [float(e) for e in sigma]
proposal['sigma'] = sigma
# Convert the proposal lists to PyTorch Tensors.
proposal['mu'] = torch.FloatTensor(proposal['mu'])
proposal['sigma'] = torch.FloatTensor(proposal['sigma'])
# Inference on theta is done using a critic network in an adverserial setting.
if '--sigmoid' in sys.argv:
critic = CriticWithSigmoid(num_hidden=50)
else:
critic = Critic(num_hidden=50)
# Obtain the batch size from the arguments.
if '--batch-size' in sys.argv:
batch_size = int(sys.argv[sys.argv.index('--batch-size') + 1])
else:
batch_size = 256
# Fit the proposal distribution to the real distribution using the critic.
fit(proposal, p_r, critic, theta_true, batch_size)
# Display the current parameterization of the proposal distribution.
print("\nProposal Distribution:")
print(" - Beam Energy:")
print(" mu: " + str(proposal['mu'][0]))
print(" sigma: " + str(proposal['sigma'][0]))
print(" - Fermi's Constant:")
print(" mu: " + str(proposal['mu'][1]))
print(" sigma: " + str(proposal['sigma'][1]))
print("\nTrue Distribution:")
print(" - Beam Energy: " + str(theta_true[0]))
print(" - Fermi's Constant: " + str(theta_true[1]))
def fit(proposal, p_r, critic, theta_true, num_iterations=1000, batch_size=256):
critic_optimizer = torch.optim.Adam(critic.parameters(), lr=0.001)
for iteration in range(0, num_iterations):
print("True Mu: " + str(theta_true))
print("Current Mu: " + str(proposal['mu']))
print("Current Sigma: " + str(proposal['sigma']))
# Fit the critic network.
fit_critic(proposal, p_r, critic, critic_optimizer, batch_size=batch_size, num_critic_iterations=1000)
# Fit the proposal distribution.
fit_proposal(proposal, p_r, critic, batch_size)
def fit_critic(proposal, p_r, critic, optimizer, num_critic_iterations=50000, batch_size=256):
# Fetch the data batches.
x_r = sample_real_data(p_r, batch_size)
x_g = sample_generated_data(proposal, batch_size)
# Fit the critic optimally.
for iteration in range(0, num_critic_iterations):
# Reset the gradients.
critic.zero_grad()
# Forward pass with real data.
y_r = critic(x_r).mean()
# Forward pass with generated data.
y_g = critic(x_g).mean()
# Obtain gradient penalty (GP).
gp = compute_gradient_penalty(critic, x_r.data, x_g.data).mean()
# Compute the loss, and the accompanying gradients.
loss = y_g - y_r + gp
loss.backward()
optimizer.step()
def fit_proposal(proposal, p_r, critic, batch_size=256):
# TODO Implement Gaussian Process
pass
def compute_gradient_penalty(critic, real, fake, l=5.0):
# Compute x_hat and its output.
epsilon = torch.rand(real.size())
x_hat = epsilon * real + ((1. - epsilon) * fake)
x_hat = torch.autograd.Variable(x_hat, requires_grad=True)
y_hat = critic(x_hat)
# Compute the associated gradients.
gradients = torch.autograd.grad(outputs=y_hat, inputs=x_hat,
grad_outputs=torch.ones(y_hat.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
# Prevent norm 0 causing NaN.
gradients = gradients + 1e-16
# Compute the gradient penalty.
gradient_penalty = l * ((gradients.norm(2, dim=1) - 1.) ** 2)
return gradient_penalty
def sample_real_data(p_r, batch_size=256):
samples = torch.zeros((batch_size, 1))
num_samples_p_r = len(p_r)
for index in range(0, batch_size):
random_index = random.randint(0, num_samples_p_r - 1)
samples[index, :] = p_r[random_index]
return torch.autograd.Variable(samples)
def sample_generated_data(proposal, batch_size=256):
# Sample `batch_size` thetas according to our proposal distribution.
thetas = draw_gaussian(proposal, batch_size)
# Obtain the individual Gaussians.
theta_beam_energy = thetas[:, 0]
theta_fermi_constant = thetas[:, 1]
# Sample according to the proposal distribution.
samples = torch.zeros((batch_size, 1))
for sample_index, theta in enumerate(thetas):
samples[sample_index, :] = simulator(theta, 1)
return torch.autograd.Variable(samples)
def gaussian_logpdf(mu, sigma, theta):
a = 0.91893853320467267
logpdf = -(sigma.log() + np.log((2. * np.pi) ** .5) + (theta - mu) ** 2 / (2. * sigma ** 2))
return logpdf
def gaussian_differential_entropy(sigma):
dentropy = (sigma * (2. * np.pi * np.e) ** .5).log()
return dentropy
def add_prior_beam_energy(prior):
g = random_gaussian(mu=[30, 60], sigma=1.0)
add_prior(prior, g['mu'], g['sigma'])
def add_prior_fermi_constant(prior):
g = random_gaussian(mu=[0, 2], sigma=1.0)
add_prior(prior, g['mu'], g['sigma'])
def add_prior(prior, mu, sigma):
prior['mu'].append(mu)
prior['sigma'].append(sigma)
def random_gaussian(mu=[-1, 1], sigma=5.0):
return {'mu': np.random.uniform(mu[0], mu[1]),
'sigma': np.random.uniform(0.0, sigma)}
def draw_gaussian(d, num_samples, random_state=None):
num_parameters = len(d['mu'])
thetas = torch.zeros((num_samples, num_parameters))
mu = d['mu']
sigma = d['sigma']
for i in range(0, num_samples):
gaussian = torch.normal(mu, sigma)
thetas[i, :] = gaussian
return thetas
def real_experiment(theta, n_samples):
return simulator(theta, n_samples)
def simulator(theta, n_samples, random_state=None):
rng = check_random_state(random_state)
samples = simulator_rej_sample_costheta(n_samples, theta, rng)
return torch.from_numpy(samples.reshape(-1, 1)).float()
def simulator_rej_sample_costheta(n_samples, theta, rng):
sqrtshalf = theta[0]
gf = theta[1]
ntrials = 0
samples = []
x = torch.linspace(-1, 1, steps=1000)
maxval = torch.max(simulator_diffxsec(x, sqrtshalf, gf))
while len(samples) < n_samples:
ntrials = ntrials + 1
xprop = rng.uniform(-1, 1)
ycut = rng.rand()
yprop = (simulator_diffxsec(xprop, sqrtshalf, gf) / maxval)[0]
if (yprop / maxval) < ycut:
continue
samples.append(xprop)
return np.array(samples)
def simulator_diffxsec(costheta, sqrtshalf, gf):
norm = 2. * (1. + 1. / 3.)
return ((1 + costheta ** 2) + simulator_a_fb(sqrtshalf, gf) * costheta) / norm
def simulator_a_fb(sqrtshalf, gf):
mz = 90
gf_nom = 0.9
sqrts = sqrtshalf * 2.
x = torch.FloatTensor([(sqrts - mz) / mz * 10])
a_fb_en = torch.tanh(x)
a_fb_gf = gf / gf_nom
return 2 * a_fb_en * a_fb_gf
class Critic(torch.nn.Module):
def __init__(self, num_hidden):
super(Critic, self).__init__()
self.fc_1 = torch.nn.Linear(1, num_hidden)
self.fc_2 = torch.nn.Linear(num_hidden, num_hidden)
self.fc_3 = torch.nn.Linear(num_hidden, 1)
def forward(self, x):
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = (self.fc_3(x))
return x
class CriticWithSigmoid(torch.nn.Module):
def __init__(self, num_hidden):
super(CriticWithSigmoid, self).__init__()
self.fc_1 = torch.nn.Linear(1, num_hidden)
self.fc_2 = torch.nn.Linear(num_hidden, num_hidden)
self.fc_3 = torch.nn.Linear(num_hidden, 1)
def forward(self, x):
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = F.sigmoid(self.fc_3(x))
return x
if __name__ == '__main__':
main()
|
fighterCui/L4ReFiascoOC
|
refs/heads/master
|
l4/pkg/python/contrib/Tools/framer/framer/__init__.py
|
48
|
"""A tool to generate basic framework for C extension types.
The basic ideas is the same as modulator, but the code generates code
using many of the new features introduced in Python 2.2. It also
takes a more declarative approach to generating code.
"""
|
fernandezcuesta/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/dimensiondata/__init__.py
|
12133432
| |
indashnet/InDashNet.Open.UN2000
|
refs/heads/master
|
android/external/markdown/markdown/inlinepatterns.py
|
107
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
henriquegemignani/image2tiled
|
refs/heads/master
|
tests/test_tiled_generator.py
|
1
|
import json
import os
import pytest
import image2tiled.tiled_generator
_path = os.path.dirname(os.path.abspath(__file__))
def test_json(image_exporter, tile_extractor, reader_4x4, tiled_4x4):
rotation_results = tile_extractor.extract(reader_4x4)
new_image = image_exporter.create(rotation_results.unique_images, 20)
new_image.filename = "4x4_tiles.png"
with open(tiled_4x4) as tiled_4x4_file:
tiled_4x4_data = json.load(tiled_4x4_file)
tiled = image2tiled.tiled_generator.TiledGenerator(reader_4x4.tile_size,
reader_4x4.num_tiles)
tiled.add_layer(rotation_results, new_image, tiles_per_row=6)
assert tiled.json() == tiled_4x4_data
@pytest.mark.parametrize("filename", [
"example_4_small1",
"example_4_small2",
"example_4_original",
])
def test_json_full_16x16(image_exporter, tile_extractor, filename):
import image2tiled.image_reader
reader = image2tiled.image_reader.ImageReader(os.path.join(_path, "sample_files", filename + ".png"), 16)
results = tile_extractor.extract(reader)
images_per_row = 128
new_image = image_exporter.create(results.unique_images, images_per_row)
new_image.filename = filename + "-tilemap.png"
with open(os.path.join(_path, "sample_files", filename + ".json")) as tiled_file:
tiled_data = json.load(tiled_file)
tiled = image2tiled.tiled_generator.TiledGenerator(reader.tile_size,
reader.num_tiles)
tiled.add_layer(results, new_image, tiles_per_row=images_per_row)
assert tiled.json() == tiled_data
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/DirectClass.py
|
83
|
# direct class doc
class Foo(object):
"<the_doc>Doc of Foo."
pass
<the_ref>Foo
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/django-voting/voting/templatetags/voting_tags.py
|
32
|
from django import template
from django.utils.html import escape
from voting.models import Vote
register = template.Library()
# Tags
class ScoreForObjectNode(template.Node):
def __init__(self, object, context_var):
self.object = object
self.context_var = context_var
def render(self, context):
try:
object = template.resolve_variable(self.object, context)
except template.VariableDoesNotExist:
return ''
context[self.context_var] = Vote.objects.get_score(object)
return ''
class ScoresForObjectsNode(template.Node):
def __init__(self, objects, context_var):
self.objects = objects
self.context_var = context_var
def render(self, context):
try:
objects = template.resolve_variable(self.objects, context)
except template.VariableDoesNotExist:
return ''
context[self.context_var] = Vote.objects.get_scores_in_bulk(objects)
return ''
class VoteByUserNode(template.Node):
def __init__(self, user, object, context_var):
self.user = user
self.object = object
self.context_var = context_var
def render(self, context):
try:
user = template.resolve_variable(self.user, context)
object = template.resolve_variable(self.object, context)
except template.VariableDoesNotExist:
return ''
context[self.context_var] = Vote.objects.get_for_user(object, user)
return ''
class VotesByUserNode(template.Node):
def __init__(self, user, objects, context_var):
self.user = user
self.objects = objects
self.context_var = context_var
def render(self, context):
try:
user = template.resolve_variable(self.user, context)
objects = template.resolve_variable(self.objects, context)
except template.VariableDoesNotExist:
return ''
context[self.context_var] = Vote.objects.get_for_user_in_bulk(objects, user)
return ''
class DictEntryForItemNode(template.Node):
def __init__(self, item, dictionary, context_var):
self.item = item
self.dictionary = dictionary
self.context_var = context_var
def render(self, context):
try:
dictionary = template.resolve_variable(self.dictionary, context)
item = template.resolve_variable(self.item, context)
except template.VariableDoesNotExist:
return ''
context[self.context_var] = dictionary.get(item.id, None)
return ''
def do_score_for_object(parser, token):
"""
Retrieves the total score for an object and the number of votes
it's received and stores them in a context variable which has
``score`` and ``num_votes`` properties.
Example usage::
{% score_for_object widget as score %}
{{ score.score }}point{{ score.score|pluralize }}
after {{ score.num_votes }} vote{{ score.num_votes|pluralize }}
"""
bits = token.contents.split()
if len(bits) != 4:
raise template.TemplateSyntaxError("'%s' tag takes exactly three arguments" % bits[0])
if bits[2] != 'as':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'as'" % bits[0])
return ScoreForObjectNode(bits[1], bits[3])
def do_scores_for_objects(parser, token):
"""
Retrieves the total scores for a list of objects and the number of
votes they have received and stores them in a context variable.
Example usage::
{% scores_for_objects widget_list as score_dict %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise template.TemplateSyntaxError("'%s' tag takes exactly three arguments" % bits[0])
if bits[2] != 'as':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'as'" % bits[0])
return ScoresForObjectsNode(bits[1], bits[3])
def do_vote_by_user(parser, token):
"""
Retrieves the ``Vote`` cast by a user on a particular object and
stores it in a context variable. If the user has not voted, the
context variable will be ``None``.
Example usage::
{% vote_by_user user on widget as vote %}
"""
bits = token.contents.split()
if len(bits) != 6:
raise template.TemplateSyntaxError("'%s' tag takes exactly five arguments" % bits[0])
if bits[2] != 'on':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'on'" % bits[0])
if bits[4] != 'as':
raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0])
return VoteByUserNode(bits[1], bits[3], bits[5])
def do_votes_by_user(parser, token):
"""
Retrieves the votes cast by a user on a list of objects as a
dictionary keyed with object ids and stores it in a context
variable.
Example usage::
{% votes_by_user user on widget_list as vote_dict %}
"""
bits = token.contents.split()
if len(bits) != 6:
raise template.TemplateSyntaxError("'%s' tag takes exactly four arguments" % bits[0])
if bits[2] != 'on':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'on'" % bits[0])
if bits[4] != 'as':
raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0])
return VotesByUserNode(bits[1], bits[3], bits[5])
def do_dict_entry_for_item(parser, token):
"""
Given an object and a dictionary keyed with object ids - as
returned by the ``votes_by_user`` and ``scores_for_objects``
template tags - retrieves the value for the given object and
stores it in a context variable, storing ``None`` if no value
exists for the given object.
Example usage::
{% dict_entry_for_item widget from vote_dict as vote %}
"""
bits = token.contents.split()
if len(bits) != 6:
raise template.TemplateSyntaxError("'%s' tag takes exactly five arguments" % bits[0])
if bits[2] != 'from':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'from'" % bits[0])
if bits[4] != 'as':
raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0])
return DictEntryForItemNode(bits[1], bits[3], bits[5])
register.tag('score_for_object', do_score_for_object)
register.tag('scores_for_objects', do_scores_for_objects)
register.tag('vote_by_user', do_vote_by_user)
register.tag('votes_by_user', do_votes_by_user)
register.tag('dict_entry_for_item', do_dict_entry_for_item)
# Simple Tags
def confirm_vote_message(object_description, vote_direction):
"""
Creates an appropriate message asking the user to confirm the given vote
for the given object description.
Example usage::
{% confirm_vote_message widget.title direction %}
"""
if vote_direction == 'clear':
message = 'Confirm clearing your vote for <strong>%s</strong>.'
else:
message = 'Confirm <strong>%s</strong> vote for <strong>%%s</strong>.' % vote_direction
return message % (escape(object_description),)
register.simple_tag(confirm_vote_message)
# Filters
def vote_display(vote, arg=None):
"""
Given a string mapping values for up and down votes, returns one
of the strings according to the given ``Vote``:
========= ===================== =============
Vote type Argument Outputs
========= ===================== =============
``+1`` ``"Bodacious,Bogus"`` ``Bodacious``
``-1`` ``"Bodacious,Bogus"`` ``Bogus``
========= ===================== =============
If no string mapping is given, "Up" and "Down" will be used.
Example usage::
{{ vote|vote_display:"Bodacious,Bogus" }}
"""
if arg is None:
arg = 'Up,Down'
bits = arg.split(',')
if len(bits) != 2:
return vote.vote # Invalid arg
up, down = bits
if vote.vote == 1:
return up
return down
register.filter(vote_display)
|
tanderegg/django-cmsplugin-twitter
|
refs/heads/master
|
cmsplugin_twitter/cms_plugins.py
|
1
|
import twitter
from dateutil.parser import parse
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import TwitterConfig
class TwitterCredentialsNotSupplied(Exception):
pass
class TwitterPlugin(CMSPluginBase):
model = TwitterConfig
name = _("Twitter Feed")
render_template = "cmsplugin_twitter/feed.html"
def render(self, context, instance, placeholder):
try:
consumer_key = settings.TWITTER_CONSUMER_KEY
consumer_secret = settings.TWITTER_CONSUMER_SECRET
access_token_key = settings.TWITTER_ACCESS_TOKEN_KEY
access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET
except AttributeError:
raise TwitterCredentialsNotSupplied("""
Twitter credentials have not been supplied in your
settings, please provide them in the format of
TWITTER_{credential}, such as TWITTER_CONSUMER_KEY
""")
api = twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret
)
feed = api.GetUserTimeline(
screen_name='rootscamp',
count=instance.number_of_tweets
)
formatted_feed = []
for tweet in feed:
print tweet
tweet.created_at = parse(tweet.created_at)
formatted_feed.append(tweet)
context.update({
'object': instance,
'placeholder': placeholder,
'feed': formatted_feed
})
return context
plugin_pool.register_plugin(TwitterPlugin)
|
jesseengel/magenta
|
refs/heads/master
|
magenta/models/image_stylization/image_stylization_evaluate.py
|
2
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates the N-styles style transfer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
import tensorflow as tf
slim = tf.contrib.slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_boolean('style_grid', False,
'Whether to generate the style grid.')
flags.DEFINE_boolean('style_crossover', False,
'Whether to do a style crossover in the style grid.')
flags.DEFINE_boolean('learning_curves', True,
'Whether to evaluate learning curves for all styles.')
flags.DEFINE_integer('batch_size', 16, 'Batch size')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('eval_interval_secs', 60,
'Frequency, in seconds, at which evaluation is run.')
flags.DEFINE_integer('num_evals', 32, 'Number of evaluations of the losses.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('eval_dir', None,
'Directory where the results are saved to.')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS,
'Style weights')
FLAGS = flags.FLAGS
def main(_):
with tf.Graph().as_default():
# Create inputs in [0, 1], as expected by vgg_16.
inputs, _ = image_utils.imagenet_inputs(
FLAGS.batch_size, FLAGS.image_size)
evaluation_images = image_utils.load_evaluation_images(FLAGS.image_size)
# Process style and weight flags
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != FLAGS.num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Load style images.
style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.num_styles, image_size=FLAGS.image_size,
square_crop=True, shuffle=False)
labels = tf.unstack(labels)
def _create_normalizer_params(style_label):
"""Creates normalizer parameters from a style label."""
return {'labels': tf.expand_dims(style_label, 0),
'num_categories': FLAGS.num_styles,
'center': True,
'scale': True}
# Dummy call to simplify the reuse logic
model.transform(inputs, reuse=False,
normalizer_params=_create_normalizer_params(labels[0]))
def _style_sweep(inputs):
"""Transfers all styles onto the input one at a time."""
inputs = tf.expand_dims(inputs, 0)
stylized_inputs = [
model.transform(
inputs,
reuse=True,
normalizer_params=_create_normalizer_params(style_label))
for _, style_label in enumerate(labels)]
return tf.concat([inputs] + stylized_inputs, 0)
if FLAGS.style_grid:
style_row = tf.concat(
[tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images],
0)
stylized_training_example = _style_sweep(inputs[0])
stylized_evaluation_images = [
_style_sweep(image) for image in tf.unstack(evaluation_images)]
stylized_noise = _style_sweep(
tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
stylized_style_images = [
_style_sweep(image) for image in tf.unstack(style_images)]
if FLAGS.style_crossover:
grid = tf.concat(
[style_row, stylized_training_example, stylized_noise] +
stylized_evaluation_images + stylized_style_images,
0)
else:
grid = tf.concat(
[style_row, stylized_training_example, stylized_noise] +
stylized_evaluation_images,
0)
if FLAGS.style_crossover:
grid_shape = [
3 + evaluation_images.get_shape().as_list()[0] + FLAGS.num_styles,
1 + FLAGS.num_styles]
else:
grid_shape = [
3 + evaluation_images.get_shape().as_list()[0],
1 + FLAGS.num_styles]
tf.summary.image(
'Style Grid',
tf.cast(
image_utils.form_image_grid(
grid,
grid_shape,
[FLAGS.image_size, FLAGS.image_size],
3) * 255.0,
tf.uint8))
if FLAGS.learning_curves:
metrics = {}
for i, label in enumerate(labels):
gram_matrices = dict(
(key, value[i: i + 1])
for key, value in style_gram_matrices.items())
stylized_inputs = model.transform(
inputs,
reuse=True,
normalizer_params=_create_normalizer_params(label))
_, loss_dict = learning.total_loss(
inputs, stylized_inputs, gram_matrices, content_weights,
style_weights, reuse=i > 0)
for key, value in loss_dict.items():
metrics['{}_style_{}'.format(key, i)] = slim.metrics.streaming_mean(
value)
names_values, names_updates = slim.metrics.aggregate_metric_map(metrics)
for name, value in names_values.items():
summary_op = tf.summary.scalar(name, value, [])
print_op = tf.Print(summary_op, [value], name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
eval_op = names_updates.values()
num_evals = FLAGS.num_evals
else:
eval_op = None
num_evals = 1
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
logdir=os.path.expanduser(FLAGS.eval_dir),
eval_op=eval_op,
num_evals=num_evals,
eval_interval_secs=FLAGS.eval_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
CiscoSystems/os-sqe
|
refs/heads/master
|
lab/nodes/xrvr.py
|
1
|
from lab.nodes.virtual_server import VirtualServer
# use telnet 0 5087 from xrnc to see the bootstrap process just after spinning up XRNC VM
# configuration is kept in sudo cat /etc/vpe/vsocsr/dl_server.ini on cisco@xrnc
# error loging in /var/log/sr/dl_registration_errors.log
class Xrvr(VirtualServer):
def __init__(self, **kwargs):
super(Xrvr, self).__init__(**kwargs)
self._expect_commands = {}
def disrupt(self, method_to_disrupt, downtime):
import time
vts_host = self.get_hardware_server()
if method_to_disrupt == 'libvirt-suspend':
# self.get_id()[-1] if id is "xrnc1" => 1, "xrnc2" => 2
vts_host.exe(command='virsh suspend xrnc{}'.format(self.get_node_id()[-1]))
time.sleep(downtime)
vts_host.exe(command='virsh resume xrnc{}'.format(self.get_node_id()[-1]))
elif method_to_disrupt == 'corosync-stop':
self.cmd('sudo service corosync stop', is_xrvr=False)
time.sleep(downtime)
self.cmd('sudo service corosync start', is_xrvr=False)
elif method_to_disrupt == 'ncs-stop':
self.cmd('sudo service ncs stop', is_xrvr=False)
time.sleep(downtime)
self.cmd('sudo service ncs start', is_xrvr=False)
elif method_to_disrupt == 'vm-reboot':
self.exe('set -m; sudo bash -c "ip link set dev eth0 down && ip link set dev eth1 down && sleep {0} && shutdown -r now" 2>/dev/null >/dev/null &'.format(downtime), is_warn_only=True)
time.sleep(downtime)
elif method_to_disrupt == 'isolate-from-mx':
# self.get_id()[-1] if id is "xrnc1" => 1, "xrnc2" => 2
ans = vts_host.exe('ip l | grep mgmt | grep xrnc{}'.format(self.get_node_id()[-1]))
if_name = ans.split()[1][:-1]
vts_host.exe('ip l s dev {} down'.format(if_name))
time.sleep(downtime)
vts_host.exe('ip l s dev {} up'.format(if_name))
elif method_to_disrupt == 'isolate-from-t':
# self.get_id()[-1] if id is "xrnc1" => 1, "xrnc2" => 2
ans = vts_host.exe('ip l | grep tenant | xrnc{}'.format(self.get_node_id()[-1]))
if_name = ans.split()[1][:-1]
vts_host.exe('ip l s dev {} down'.format(if_name))
time.sleep(downtime)
vts_host.exe('ip l s dev {} up'.format(if_name))
def get_xrvr_ip_user_pass(self):
_, u, p = self.get_oob()
return self.get_ip_mx(), u, p
def get_xrnc_ip_user_pass(self):
return self._server.get_ssh()
def get_ip_t(self):
return self.get_nic('t').get_ip_and_mask()[0]
# noinspection PyMethodOverriding
def cmd(self, cmd, is_xrvr, is_warn_only=False): # XRVR uses redirection: ssh_username goes to DL while oob_username goes to XRVR, ip and password are the same for both
ip = self.get_ip_mx()
if is_xrvr:
_, username, password = self.get_oob()
if cmd not in self._expect_commands:
self.create_expect_command_file(cmd=cmd, ip=ip, username=username, password=password, is_xrvr=True)
ans = self._proxy.exe(command='expect {0}'.format(self._expect_commands[cmd]), is_warn_only=is_warn_only)
else:
ans = self.exe(command=cmd, is_warn_only=is_warn_only)
return ans
def create_expect_command_file(self, cmd, ip, username, password, is_xrvr):
import inspect
sudo_tmpl = '''spawn sshpass -p {p} ssh -o StrictHostKeyChecking=no {u}@{ip}
set timeout 20
expect {{
"$ "
}}
send "{cmd}\\r"
expect {{
": "
}}
send "{p}\\r"
sleep 1
expect {{
"$ " exit
}}
'''
xrvr_tmpl = '''spawn sshpass -p {p} ssh -o StrictHostKeyChecking=no {u}@{ip}
set timeout 20
expect {{
"#"
}}
send "terminal length 0\\r"
send "{cmd}\\r"
sleep 1
expect {{
"#" exit
}}
'''
tmpl = xrvr_tmpl if is_xrvr else sudo_tmpl
s = tmpl.format(p=password, u=username, ip=ip, cmd=cmd)
stack = inspect.stack()
cmd_name = stack[2][3]
expect_scripts_dir = 'expect-scripts'
file_name = 'expect-{}-{}'.format(self.get_node_id(), cmd_name)
self._proxy.r_put_string_as_file_in_dir(string_to_put=s, file_name=file_name, in_directory='expect-scripts')
self._expect_commands[cmd] = expect_scripts_dir + '/' + file_name
@staticmethod
def _get(raw, key):
"""
Return values of a key element found in raw text.
:param raw: looks like :
evpn
evi 10000
network-controller
host mac fa16.3e5b.9162
ipv4 address 10.23.23.2
switch 11.12.13.9
gateway 10.23.23.1 255.255.255.0
vlan 1002
:param key: A key string. Ex: vlan, gateway
:return: Value of a key parameter
"""
import re
try:
# First 2 lines are the called command
# The last line is a prompt
for line in raw.split('\r\n')[2:-1]:
if line.startswith('#'):
continue
m = re.search('\s*(?<={0} )(.*?)\r'.format(key), line)
if m:
return m.group(1)
except AttributeError:
return None
def xrvr_show_running_config(self):
return self.cmd('show running-config', is_xrvr=True)
def r_xrvr_day0_config(self):
import jinja2
template = jinja2.Template('''configure
router ospf 100
router-id 99.99.99.99
address-family ipv4 unicast
area 0.0.0.0
default-cost 10
interface Loopback0
interface GigabitEthernet0/0/0/0
commit
exit
exit
interface Loopback0
ipv4 address 99.99.99.99 255.255.255.255
exit
router bgp {{ bgp_asn }}
bgp router-id 99.99.99.99
address-family ipv4 unicast
address-family l2vpn evpn
retain route-target all
exit
exit
''')
command = template.render(bgp_asn=23)
self.cmd(command, is_xrvr=True)
def xrvr_show_host(self, evi, mac):
# mac should look like 0010.1000.2243
mac = mac.replace(':', '').lower()
mac = '.'.join([mac[0:4], mac[4:8], mac[8:16]])
cmd = 'show running-config evpn evi {0} network-control host mac {1}'.format(evi, mac)
raw = self.cmd(cmd, is_xrvr=True)
if 'No such configuration item' not in raw:
return {
'ipv4_address': self._get(raw, 'ipv4 address'),
'switch': self._get(raw, 'switch'),
'mac': self._get(raw, 'host mac'),
'evi': self._get(raw, 'evi')
}
return None
def r_xrvr_show_evpn(self):
return self.cmd('show running-config evpn', is_xrvr=True)
def r_xrvr_show_bgp_l2vpn_evpn(self):
return self.cmd('sh bgp l2vpn evpn', is_xrvr=True)
def xrvr_show_connections_xrvr_vtf(self):
return self.exe('netstat -ant |grep 21345')
def get_config_and_net_part_bodies(self):
""" Deduce XRVR VM iso file configuration and net part for the libvirt domain
:return: tuple with (config body, net part body)
"""
cfg_tmpl = self.read_config_from_file(config_path='xrnc-vm-config.txt', directory='vts', is_as_string=True)
net_part_tmpl = self.read_config_from_file(config_path='xrnc-net-part-of-libvirt-domain.template', directory='vts', is_as_string=True)
dns_ip, ntp_ip = self.lab().get_dns()[0], self.lab().get_ntp()[0]
xrvr_name = self.get_node_id()
xrnc_name = xrvr_name.replace('xrvr', 'xrnc')
_, vtc_username, vtc_password = self.lab().get_node_by_id('vtc1').get_oob()
_, username, password = self._server.get_ssh()
_, oob_username, oob_password = self.get_oob()
mx_nic = self.get_nic('mx') # XRNC sits on mx and t nets
te_nic = self.get_nic('t')
vtc_mx_vip = mx_nic.get_net().get_ip_for_index(150)
dl_mx_ip, mx_net_mask = mx_nic.get_ip_and_mask()
mx_gw, mx_net_len = mx_nic.get_net().get_gw(), mx_nic.get_net().get_prefix_len()
xrvr_mx_ip = mx_nic.get_net().get_ip_for_index(200 + int(self.get_node_id()[-1]))
dl_te_ip, te_net_mask = te_nic.get_ip_and_mask()
te_vlan = te_nic.get_net().get_vlan_id()
te_gw, te_net_len = te_nic.get_net().get_gw(), te_nic.get_net().get_prefix_len()
xrvr_te_ip = te_nic.get_net().get_ip_for_index(200 + int(self.get_node_id()[-1]))
# XRVR is a VM sitting in a VM which runs on vts-host. outer VM called DL inner VM called XRVR , so 2 IPs on ssh and vts networks needed
cfg_body = cfg_tmpl.format(dl_mx_ip=dl_mx_ip, xrvr_mx_ip=xrvr_mx_ip, mx_net_mask=mx_net_mask, mx_net_len=mx_net_len, mx_gw=mx_gw,
dl_te_ip=dl_te_ip, xrvr_te_ip=xrvr_te_ip, te_net_mask=te_net_mask, te_net_len=te_net_len, te_gw=te_gw,
dns_ip=dns_ip, ntp_ip=ntp_ip, vtc_mx_ip=vtc_mx_vip,
xrnc_username=username, xrvr_username=oob_username, xrvr_password=oob_password, vtc_username=vtc_username, vtc_password=vtc_password,
xrnc_name=xrnc_name, xrvr_name=xrvr_name)
with self.open_artifact(xrnc_name, 'w') as f:
f.write(cfg_body)
net_part = net_part_tmpl.format(mx_nic_name='mx', t_nic_name='t', t_vlan=te_vlan)
return cfg_body, net_part
# def r_collect_info(self, regex):
# logs = ''
# for cmd in [self._form_log_grep_cmd(log_files='/var/log/sr/*err*.log', regex=regex)]:
# ans = self.cmd(cmd=cmd, is_xrvr=False, is_warn_only=True)
# logs += self._format_single_cmd_output(cmd=cmd, ans=ans)
# return logs
#
# return self._format_single_cmd_output(cmd='show running config', ans=self.xrvr_show_running_config())
def r_xrnc_set_mtu(self):
self.cmd('sudo ip l s dev br-underlay mtu 1400', is_xrvr=False) # https://cisco.jiveon.com/docs/DOC-1455175 step 12 about MTU
def r_xrnc_start_dl(self):
own_ip = self.get_nic('t').get_ip_and_mask()[0]
ips = [x.get_nic('t').get_ip_and_mask()[0] for x in self.lab().get_nodes_by_class(Xrvr)]
opposite_ip = next(iter(set(ips) - {own_ip}))
ans = self.cmd('sudo /opt/cisco/package/sr/bin/setupXRNC_HA.sh {}'.format(opposite_ip), is_xrvr=False) # https://cisco.jiveon.com/docs/DOC-1455175 Step 11
if 'please re-run this script with the -s flag' in ans:
self.cmd('sudo /opt/cisco/package/sr/bin/setupXRNC_HA.sh -s {}'.format(opposite_ip), is_xrvr=False) # https://cisco.jiveon.com/docs/DOC-1455175 Step 11
return True
def r_xrnc_restart_dl(self):
return self.cmd('sudo crm resource restart dl_server', is_xrvr=False)
def xrnc_get_interfaces_config(self):
import re
interfaces_text = self.cmd('cat /etc/network/interfaces', is_xrvr=False)
config = {}
interface_name = ''
for line in interfaces_text.split('\r\n'):
sr = re.search('iface (?P<name>.*?) inet', line)
if sr:
interface_name = sr.group('name')
config[interface_name] = ''
if interface_name:
config[interface_name] += line + '\r\n'
return config
def r_edit_etc_hosts(self):
self.exe('grep {n} /etc/hosts || echo {n}\t{ip}\n >> /etc/hosts'.format(n=self.get_node_id(), ip=self.get_ip_mx()))
def r_border_leaf(self):
self.cmd(cmd='conf t interface Loopback0', is_xrvr=True)
def r_xrnc_wait_all_online(self, n_retries=1):
import time
nodes = self.lab().get_xrvr()
while True:
if all([node.r_is_online() for node in nodes]):
return True
n_retries -= 1
if n_retries == 0:
raise RuntimeError('After {} retries some XRVR are not up'.format(n_retries))
else:
time.sleep(30)
continue
|
dsajkl/reqiop
|
refs/heads/master
|
pavelib/quality.py
|
2
|
"""
Check code quality using pep8, pylint, and diff_quality.
"""
from paver.easy import sh, task, cmdopts, needs, BuildFailure
import os
import re
from .utils.envs import Env
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
("errors", "e", "Check for errors only"),
("limit=", "l", "limit for number of acceptable violations"),
])
def run_pylint(options):
"""
Run pylint on system code. When violations limit is passed in,
fail the task if too many violations are found.
"""
num_violations = 0
violations_limit = int(getattr(options, 'limit', -1))
errors = getattr(options, 'errors', False)
systems = getattr(options, 'system', 'lms,cms,common').split(',')
for system in systems:
# Directory to put the pylint report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
flags = '-E' if errors else ''
apps = [system]
for directory in ['djangoapps', 'lib']:
dirs = os.listdir(os.path.join(system, directory))
apps.extend([d for d in dirs if os.path.isdir(os.path.join(system, directory, d))])
apps_list = ' '.join(apps)
pythonpath_prefix = (
"PYTHONPATH={system}:{system}/djangoapps:{system}/"
"lib:common/djangoapps:common/lib".format(
system=system
)
)
sh(
"{pythonpath_prefix} pylint {flags} -f parseable {apps} | "
"tee {report_dir}/pylint.report".format(
pythonpath_prefix=pythonpath_prefix,
flags=flags,
apps=apps_list,
report_dir=report_dir
)
)
num_violations += _count_pylint_violations(
"{report_dir}/pylint.report".format(report_dir=report_dir))
print("Number of pylint violations: " + str(num_violations))
if num_violations > violations_limit > -1:
raise Exception("Failed. Too many pylint violations. "
"The limit is {violations_limit}.".format(violations_limit=violations_limit))
def _count_pylint_violations(report_file):
"""
Parses a pylint report line-by-line and determines the number of violations reported
"""
num_violations_report = 0
# An example string:
# common/lib/xmodule/xmodule/tests/test_conditional.py:21: [C0111(missing-docstring), DummySystem] Missing docstring
# More examples can be found in the unit tests for this method
pylint_pattern = re.compile(".(\d+):\ \[(\D\d+.+\]).")
for line in open(report_file):
violation_list_for_line = pylint_pattern.split(line)
# If the string is parsed into four parts, then we've found a violation. Example of split parts:
# test file, line number, violation name, violation details
if len(violation_list_for_line) == 4:
num_violations_report += 1
return num_violations_report
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("system=", "s", "System to act on"),
("limit=", "l", "limit for number of acceptable violations"),
])
def run_pep8(options):
"""
Run pep8 on system code. When violations limit is passed in,
fail the task if too many violations are found.
"""
num_violations = 0
systems = getattr(options, 'system', 'lms,cms,common').split(',')
violations_limit = int(getattr(options, 'limit', -1))
for system in systems:
# Directory to put the pep8 report in.
# This makes the folder if it doesn't already exist.
report_dir = (Env.REPORT_DIR / system).makedirs_p()
sh('pep8 {system} | tee {report_dir}/pep8.report'.format(system=system, report_dir=report_dir))
num_violations = num_violations + _count_pep8_violations(
"{report_dir}/pep8.report".format(report_dir=report_dir))
print("Number of pep8 violations: " + str(num_violations))
# Fail the task if the violations limit has been reached
if num_violations > violations_limit > -1:
raise Exception("Failed. Too many pep8 violations. "
"The limit is {violations_limit}.".format(violations_limit=violations_limit))
def _count_pep8_violations(report_file):
num_lines = sum(1 for line in open(report_file))
return num_lines
@task
@needs('pavelib.prereqs.install_python_prereqs')
@cmdopts([
("percentage=", "p", "fail if diff-quality is below this percentage"),
])
def run_quality(options):
"""
Build the html diff quality reports, and print the reports to the console.
:param: p, diff-quality will fail if the quality percentage calculated is
below this percentage. For example, if p is set to 80, and diff-quality finds
quality of the branch vs master is less than 80%, then this task will fail.
This threshold would be applied to both pep8 and pylint.
"""
# Directory to put the diff reports in.
# This makes the folder if it doesn't already exist.
dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
diff_quality_percentage_failure = False
# Set the string, if needed, to be used for the diff-quality --fail-under switch.
diff_threshold = int(getattr(options, 'percentage', -1))
percentage_string = ''
if diff_threshold > -1:
percentage_string = '--fail-under={0}'.format(diff_threshold)
# Generate diff-quality html report for pep8, and print to console
# If pep8 reports exist, use those
# Otherwise, `diff-quality` will call pep8 itself
pep8_files = get_violations_reports("pep8")
pep8_reports = u' '.join(pep8_files)
try:
sh(
"diff-quality --violations=pep8 {pep8_reports} {percentage_string} "
"--html-report {dquality_dir}/diff_quality_pep8.html".format(
pep8_reports=pep8_reports,
percentage_string=percentage_string,
dquality_dir=dquality_dir
)
)
except BuildFailure, error_message:
if is_percentage_failure(error_message):
diff_quality_percentage_failure = True
else:
raise BuildFailure(error_message)
# Generate diff-quality html report for pylint, and print to console
# If pylint reports exist, use those
# Otherwise, `diff-quality` will call pylint itself
pylint_files = get_violations_reports("pylint")
pylint_reports = u' '.join(pylint_files)
pythonpath_prefix = (
"PYTHONPATH=$PYTHONPATH:lms:lms/djangoapps:lms/lib:cms:cms/djangoapps:cms/lib:"
"common:common/djangoapps:common/lib"
)
try:
sh(
"{pythonpath_prefix} diff-quality --violations=pylint {pylint_reports} {percentage_string} "
"--html-report {dquality_dir}/diff_quality_pylint.html".format(
pythonpath_prefix=pythonpath_prefix,
pylint_reports=pylint_reports,
percentage_string=percentage_string,
dquality_dir=dquality_dir
)
)
except BuildFailure, error_message:
if is_percentage_failure(error_message):
diff_quality_percentage_failure = True
else:
raise BuildFailure(error_message)
# If one of the diff-quality runs fails, then paver exits with an error when it is finished
if diff_quality_percentage_failure:
raise BuildFailure("Diff-quality failure(s).")
def is_percentage_failure(error_message):
"""
When diff-quality is run with a threshold percentage, it ends with an exit code of 1. This bubbles up to
paver with a subprocess return code error. If the subprocess exits with anything other than 1, raise
a paver exception.
"""
if "Subprocess return code: 1" not in error_message:
return False
else:
return True
def get_violations_reports(violations_type):
"""
Finds violations reports files by naming convention (e.g., all "pep8.report" files)
"""
violations_files = []
for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):
for f in files:
if f == "{violations_type}.report".format(violations_type=violations_type):
violations_files.append(os.path.join(subdir, f))
return violations_files
|
aerophile/django
|
refs/heads/master
|
tests/sessions_tests/tests.py
|
8
|
import base64
import os
import shutil
import string
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = Session.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}=; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(settings.SESSION_COOKIE_NAME),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
|
Aloomaio/googleads-python-lib
|
refs/heads/master
|
examples/adwords/authentication/create_adwords_client_with_refresh_token.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializes an AdWordsClient without using yaml-cached credentials.
While our LoadFromStorage method provides a useful shortcut to instantiate a
client if you regularly use just one set of credentials, production applications
may need to swap out users. This example shows you how to create an OAuth2
client and an AdWordsClient without relying on a yaml file.
"""
from googleads import adwords
from googleads import oauth2
# OAuth2 credential information. In a real application, you'd probably be
# pulling these values from a credential storage.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE'
# AdWords API information.
DEVELOPER_TOKEN = 'INSERT_DEVELOPER_TOKEN_HERE'
USER_AGENT = 'INSERT_USER_AGENT_HERE'
CLIENT_CUSTOMER_ID = 'INSERT_CLIENT_CUSTOMER_ID_HERE'
def main(client_id, client_secret, refresh_token, developer_token, user_agent,
client_customer_id):
oauth2_client = oauth2.GoogleRefreshTokenClient(
client_id, client_secret, refresh_token)
adwords_client = adwords.AdWordsClient(
developer_token, oauth2_client, user_agent,
client_customer_id=client_customer_id)
customer_service = adwords_client.GetService('CustomerService',
version='v201702')
customers = customer_service.getCustomers()
print 'You are logged in as a user with access to the following customers:'
for customer in customers:
print '\t%s' % customer['customerId']
if __name__ == '__main__':
main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, DEVELOPER_TOKEN, USER_AGENT,
CLIENT_CUSTOMER_ID)
|
eXistenZNL/SickRage
|
refs/heads/master
|
lib/unidecode/x072.py
|
252
|
data = (
'He ', # 0x00
'Lan ', # 0x01
'Biao ', # 0x02
'Rong ', # 0x03
'Li ', # 0x04
'Mo ', # 0x05
'Bao ', # 0x06
'Ruo ', # 0x07
'Lu ', # 0x08
'La ', # 0x09
'Ao ', # 0x0a
'Xun ', # 0x0b
'Kuang ', # 0x0c
'Shuo ', # 0x0d
'[?] ', # 0x0e
'Li ', # 0x0f
'Lu ', # 0x10
'Jue ', # 0x11
'Liao ', # 0x12
'Yan ', # 0x13
'Xi ', # 0x14
'Xie ', # 0x15
'Long ', # 0x16
'Ye ', # 0x17
'[?] ', # 0x18
'Rang ', # 0x19
'Yue ', # 0x1a
'Lan ', # 0x1b
'Cong ', # 0x1c
'Jue ', # 0x1d
'Tong ', # 0x1e
'Guan ', # 0x1f
'[?] ', # 0x20
'Che ', # 0x21
'Mi ', # 0x22
'Tang ', # 0x23
'Lan ', # 0x24
'Zhu ', # 0x25
'[?] ', # 0x26
'Ling ', # 0x27
'Cuan ', # 0x28
'Yu ', # 0x29
'Zhua ', # 0x2a
'Tsumekanmuri ', # 0x2b
'Pa ', # 0x2c
'Zheng ', # 0x2d
'Pao ', # 0x2e
'Cheng ', # 0x2f
'Yuan ', # 0x30
'Ai ', # 0x31
'Wei ', # 0x32
'[?] ', # 0x33
'Jue ', # 0x34
'Jue ', # 0x35
'Fu ', # 0x36
'Ye ', # 0x37
'Ba ', # 0x38
'Die ', # 0x39
'Ye ', # 0x3a
'Yao ', # 0x3b
'Zu ', # 0x3c
'Shuang ', # 0x3d
'Er ', # 0x3e
'Qiang ', # 0x3f
'Chuang ', # 0x40
'Ge ', # 0x41
'Zang ', # 0x42
'Die ', # 0x43
'Qiang ', # 0x44
'Yong ', # 0x45
'Qiang ', # 0x46
'Pian ', # 0x47
'Ban ', # 0x48
'Pan ', # 0x49
'Shao ', # 0x4a
'Jian ', # 0x4b
'Pai ', # 0x4c
'Du ', # 0x4d
'Chuang ', # 0x4e
'Tou ', # 0x4f
'Zha ', # 0x50
'Bian ', # 0x51
'Die ', # 0x52
'Bang ', # 0x53
'Bo ', # 0x54
'Chuang ', # 0x55
'You ', # 0x56
'[?] ', # 0x57
'Du ', # 0x58
'Ya ', # 0x59
'Cheng ', # 0x5a
'Niu ', # 0x5b
'Ushihen ', # 0x5c
'Pin ', # 0x5d
'Jiu ', # 0x5e
'Mou ', # 0x5f
'Tuo ', # 0x60
'Mu ', # 0x61
'Lao ', # 0x62
'Ren ', # 0x63
'Mang ', # 0x64
'Fang ', # 0x65
'Mao ', # 0x66
'Mu ', # 0x67
'Gang ', # 0x68
'Wu ', # 0x69
'Yan ', # 0x6a
'Ge ', # 0x6b
'Bei ', # 0x6c
'Si ', # 0x6d
'Jian ', # 0x6e
'Gu ', # 0x6f
'You ', # 0x70
'Ge ', # 0x71
'Sheng ', # 0x72
'Mu ', # 0x73
'Di ', # 0x74
'Qian ', # 0x75
'Quan ', # 0x76
'Quan ', # 0x77
'Zi ', # 0x78
'Te ', # 0x79
'Xi ', # 0x7a
'Mang ', # 0x7b
'Keng ', # 0x7c
'Qian ', # 0x7d
'Wu ', # 0x7e
'Gu ', # 0x7f
'Xi ', # 0x80
'Li ', # 0x81
'Li ', # 0x82
'Pou ', # 0x83
'Ji ', # 0x84
'Gang ', # 0x85
'Zhi ', # 0x86
'Ben ', # 0x87
'Quan ', # 0x88
'Run ', # 0x89
'Du ', # 0x8a
'Ju ', # 0x8b
'Jia ', # 0x8c
'Jian ', # 0x8d
'Feng ', # 0x8e
'Pian ', # 0x8f
'Ke ', # 0x90
'Ju ', # 0x91
'Kao ', # 0x92
'Chu ', # 0x93
'Xi ', # 0x94
'Bei ', # 0x95
'Luo ', # 0x96
'Jie ', # 0x97
'Ma ', # 0x98
'San ', # 0x99
'Wei ', # 0x9a
'Li ', # 0x9b
'Dun ', # 0x9c
'Tong ', # 0x9d
'[?] ', # 0x9e
'Jiang ', # 0x9f
'Ikenie ', # 0xa0
'Li ', # 0xa1
'Du ', # 0xa2
'Lie ', # 0xa3
'Pi ', # 0xa4
'Piao ', # 0xa5
'Bao ', # 0xa6
'Xi ', # 0xa7
'Chou ', # 0xa8
'Wei ', # 0xa9
'Kui ', # 0xaa
'Chou ', # 0xab
'Quan ', # 0xac
'Fan ', # 0xad
'Ba ', # 0xae
'Fan ', # 0xaf
'Qiu ', # 0xb0
'Ji ', # 0xb1
'Cai ', # 0xb2
'Chuo ', # 0xb3
'An ', # 0xb4
'Jie ', # 0xb5
'Zhuang ', # 0xb6
'Guang ', # 0xb7
'Ma ', # 0xb8
'You ', # 0xb9
'Kang ', # 0xba
'Bo ', # 0xbb
'Hou ', # 0xbc
'Ya ', # 0xbd
'Yin ', # 0xbe
'Huan ', # 0xbf
'Zhuang ', # 0xc0
'Yun ', # 0xc1
'Kuang ', # 0xc2
'Niu ', # 0xc3
'Di ', # 0xc4
'Qing ', # 0xc5
'Zhong ', # 0xc6
'Mu ', # 0xc7
'Bei ', # 0xc8
'Pi ', # 0xc9
'Ju ', # 0xca
'Ni ', # 0xcb
'Sheng ', # 0xcc
'Pao ', # 0xcd
'Xia ', # 0xce
'Tuo ', # 0xcf
'Hu ', # 0xd0
'Ling ', # 0xd1
'Fei ', # 0xd2
'Pi ', # 0xd3
'Ni ', # 0xd4
'Ao ', # 0xd5
'You ', # 0xd6
'Gou ', # 0xd7
'Yue ', # 0xd8
'Ju ', # 0xd9
'Dan ', # 0xda
'Po ', # 0xdb
'Gu ', # 0xdc
'Xian ', # 0xdd
'Ning ', # 0xde
'Huan ', # 0xdf
'Hen ', # 0xe0
'Jiao ', # 0xe1
'He ', # 0xe2
'Zhao ', # 0xe3
'Ji ', # 0xe4
'Xun ', # 0xe5
'Shan ', # 0xe6
'Ta ', # 0xe7
'Rong ', # 0xe8
'Shou ', # 0xe9
'Tong ', # 0xea
'Lao ', # 0xeb
'Du ', # 0xec
'Xia ', # 0xed
'Shi ', # 0xee
'Hua ', # 0xef
'Zheng ', # 0xf0
'Yu ', # 0xf1
'Sun ', # 0xf2
'Yu ', # 0xf3
'Bi ', # 0xf4
'Mang ', # 0xf5
'Xi ', # 0xf6
'Juan ', # 0xf7
'Li ', # 0xf8
'Xia ', # 0xf9
'Yin ', # 0xfa
'Suan ', # 0xfb
'Lang ', # 0xfc
'Bei ', # 0xfd
'Zhi ', # 0xfe
'Yan ', # 0xff
)
|
prefetchnta/questlab
|
refs/heads/master
|
bin/x64bin/python/36/Lib/email/headerregistry.py
|
1
|
"""Representing and manipulating email headers via custom objects.
This module provides an implementation of the HeaderRegistry API.
The implementation is designed to flexibly follow RFC5322 rules.
Eventually HeaderRegistry will be a public API, but it isn't yet,
and will probably change some before that happens.
"""
from types import MappingProxyType
from email import utils
from email import errors
from email import _header_value_parser as parser
class Address:
def __init__(self, display_name='', username='', domain='', addr_spec=None):
"""Create an object representing a full email address.
An address can have a 'display_name', a 'username', and a 'domain'. In
addition to specifying the username and domain separately, they may be
specified together by using the addr_spec keyword *instead of* the
username and domain keywords. If an addr_spec string is specified it
must be properly quoted according to RFC 5322 rules; an error will be
raised if it is not.
An Address object has display_name, username, domain, and addr_spec
attributes, all of which are read-only. The addr_spec and the string
value of the object are both quoted according to RFC5322 rules, but
without any Content Transfer Encoding.
"""
inputs = ''.join(filter(None, (display_name, username, domain, addr_spec)))
if '\r' in inputs or '\n' in inputs:
raise ValueError("invalid arguments; address parts cannot contain CR or LF")
# This clause with its potential 'raise' may only happen when an
# application program creates an Address object using an addr_spec
# keyword. The email library code itself must always supply username
# and domain.
if addr_spec is not None:
if username or domain:
raise TypeError("addrspec specified when username and/or "
"domain also specified")
a_s, rest = parser.get_addr_spec(addr_spec)
if rest:
raise ValueError("Invalid addr_spec; only '{}' "
"could be parsed from '{}'".format(
a_s, addr_spec))
if a_s.all_defects:
raise a_s.all_defects[0]
username = a_s.local_part
domain = a_s.domain
self._display_name = display_name
self._username = username
self._domain = domain
@property
def display_name(self):
return self._display_name
@property
def username(self):
return self._username
@property
def domain(self):
return self._domain
@property
def addr_spec(self):
"""The addr_spec (username@domain) portion of the address, quoted
according to RFC 5322 rules, but with no Content Transfer Encoding.
"""
nameset = set(self.username)
if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
lp = parser.quote_string(self.username)
else:
lp = self.username
if self.domain:
return lp + '@' + self.domain
if not lp:
return '<>'
return lp
def __repr__(self):
return "{}(display_name={!r}, username={!r}, domain={!r})".format(
self.__class__.__name__,
self.display_name, self.username, self.domain)
def __str__(self):
nameset = set(self.display_name)
if len(nameset) > len(nameset-parser.SPECIALS):
disp = parser.quote_string(self.display_name)
else:
disp = self.display_name
if disp:
addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
return "{} <{}>".format(disp, addr_spec)
return self.addr_spec
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.display_name == other.display_name and
self.username == other.username and
self.domain == other.domain)
class Group:
def __init__(self, display_name=None, addresses=None):
"""Create an object representing an address group.
An address group consists of a display_name followed by colon and a
list of addresses (see Address) terminated by a semi-colon. The Group
is created by specifying a display_name and a possibly empty list of
Address objects. A Group can also be used to represent a single
address that is not in a group, which is convenient when manipulating
lists that are a combination of Groups and individual Addresses. In
this case the display_name should be set to None. In particular, the
string representation of a Group whose display_name is None is the same
as the Address object, if there is one and only one Address object in
the addresses list.
"""
self._display_name = display_name
self._addresses = tuple(addresses) if addresses else tuple()
@property
def display_name(self):
return self._display_name
@property
def addresses(self):
return self._addresses
def __repr__(self):
return "{}(display_name={!r}, addresses={!r}".format(
self.__class__.__name__,
self.display_name, self.addresses)
def __str__(self):
if self.display_name is None and len(self.addresses)==1:
return str(self.addresses[0])
disp = self.display_name
if disp is not None:
nameset = set(disp)
if len(nameset) > len(nameset-parser.SPECIALS):
disp = parser.quote_string(disp)
adrstr = ", ".join(str(x) for x in self.addresses)
adrstr = ' ' + adrstr if adrstr else adrstr
return "{}:{};".format(disp, adrstr)
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.display_name == other.display_name and
self.addresses == other.addresses)
# Header Classes #
class BaseHeader(str):
"""Base class for message headers.
Implements generic behavior and provides tools for subclasses.
A subclass must define a classmethod named 'parse' that takes an unfolded
value string and a dictionary as its arguments. The dictionary will
contain one key, 'defects', initialized to an empty list. After the call
the dictionary must contain two additional keys: parse_tree, set to the
parse tree obtained from parsing the header, and 'decoded', set to the
string value of the idealized representation of the data from the value.
(That is, encoded words are decoded, and values that have canonical
representations are so represented.)
The defects key is intended to collect parsing defects, which the message
parser will subsequently dispose of as appropriate. The parser should not,
insofar as practical, raise any errors. Defects should be added to the
list instead. The standard header parsers register defects for RFC
compliance issues, for obsolete RFC syntax, and for unrecoverable parsing
errors.
The parse method may add additional keys to the dictionary. In this case
the subclass must define an 'init' method, which will be passed the
dictionary as its keyword arguments. The method should use (usually by
setting them as the value of similarly named attributes) and remove all the
extra keys added by its parse method, and then use super to call its parent
class with the remaining arguments and keywords.
The subclass should also make sure that a 'max_count' attribute is defined
that is either None or 1. XXX: need to better define this API.
"""
def __new__(cls, name, value):
kwds = {'defects': []}
cls.parse(value, kwds)
if utils._has_surrogates(kwds['decoded']):
kwds['decoded'] = utils._sanitize(kwds['decoded'])
self = str.__new__(cls, kwds['decoded'])
del kwds['decoded']
self.init(name, **kwds)
return self
def init(self, name, *, parse_tree, defects):
self._name = name
self._parse_tree = parse_tree
self._defects = defects
@property
def name(self):
return self._name
@property
def defects(self):
return tuple(self._defects)
def __reduce__(self):
return (
_reconstruct_header,
(
self.__class__.__name__,
self.__class__.__bases__,
str(self),
),
self.__dict__)
@classmethod
def _reconstruct(cls, value):
return str.__new__(cls, value)
def fold(self, *, policy):
"""Fold header according to policy.
The parsed representation of the header is folded according to
RFC5322 rules, as modified by the policy. If the parse tree
contains surrogateescaped bytes, the bytes are CTE encoded using
the charset 'unknown-8bit".
Any non-ASCII characters in the parse tree are CTE encoded using
charset utf-8. XXX: make this a policy setting.
The returned value is an ASCII-only string possibly containing linesep
characters, and ending with a linesep character. The string includes
the header name and the ': ' separator.
"""
# At some point we need to put fws here iif it was in the source.
header = parser.Header([
parser.HeaderLabel([
parser.ValueTerminal(self.name, 'header-name'),
parser.ValueTerminal(':', 'header-sep')]),
])
if self._parse_tree:
header.append(
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]))
header.append(self._parse_tree)
return header.fold(policy=policy)
def _reconstruct_header(cls_name, bases, value):
return type(cls_name, bases, {})._reconstruct(value)
class UnstructuredHeader:
max_count = None
value_parser = staticmethod(parser.get_unstructured)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = cls.value_parser(value)
kwds['decoded'] = str(kwds['parse_tree'])
class UniqueUnstructuredHeader(UnstructuredHeader):
max_count = 1
class DateHeader:
"""Header whose value consists of a single timestamp.
Provides an additional attribute, datetime, which is either an aware
datetime using a timezone, or a naive datetime if the timezone
in the input string is -0000. Also accepts a datetime as input.
The 'value' attribute is the normalized form of the timestamp,
which means it is the output of format_datetime on the datetime.
"""
max_count = None
# This is used only for folding, not for creating 'decoded'.
value_parser = staticmethod(parser.get_unstructured)
@classmethod
def parse(cls, value, kwds):
if not value:
kwds['defects'].append(errors.HeaderMissingRequiredValue())
kwds['datetime'] = None
kwds['decoded'] = ''
kwds['parse_tree'] = parser.TokenList()
return
if isinstance(value, str):
value = utils.parsedate_to_datetime(value)
kwds['datetime'] = value
kwds['decoded'] = utils.format_datetime(kwds['datetime'])
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
def init(self, *args, **kw):
self._datetime = kw.pop('datetime')
super().init(*args, **kw)
@property
def datetime(self):
return self._datetime
class UniqueDateHeader(DateHeader):
max_count = 1
class AddressHeader:
max_count = None
@staticmethod
def value_parser(value):
address_list, value = parser.get_address_list(value)
assert not value, 'this should not happen'
return address_list
@classmethod
def parse(cls, value, kwds):
if isinstance(value, str):
# We are translating here from the RFC language (address/mailbox)
# to our API language (group/address).
kwds['parse_tree'] = address_list = cls.value_parser(value)
groups = []
for addr in address_list.addresses:
groups.append(Group(addr.display_name,
[Address(mb.display_name or '',
mb.local_part or '',
mb.domain or '')
for mb in addr.all_mailboxes]))
defects = list(address_list.all_defects)
else:
# Assume it is Address/Group stuff
if not hasattr(value, '__iter__'):
value = [value]
groups = [Group(None, [item]) if not hasattr(item, 'addresses')
else item
for item in value]
defects = []
kwds['groups'] = groups
kwds['defects'] = defects
kwds['decoded'] = ', '.join([str(item) for item in groups])
if 'parse_tree' not in kwds:
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
def init(self, *args, **kw):
self._groups = tuple(kw.pop('groups'))
self._addresses = None
super().init(*args, **kw)
@property
def groups(self):
return self._groups
@property
def addresses(self):
if self._addresses is None:
self._addresses = tuple([address for group in self._groups
for address in group.addresses])
return self._addresses
class UniqueAddressHeader(AddressHeader):
max_count = 1
class SingleAddressHeader(AddressHeader):
@property
def address(self):
if len(self.addresses)!=1:
raise ValueError(("value of single address header {} is not "
"a single address").format(self.name))
return self.addresses[0]
class UniqueSingleAddressHeader(SingleAddressHeader):
max_count = 1
class MIMEVersionHeader:
max_count = 1
value_parser = staticmethod(parser.parse_mime_version)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
kwds['major'] = None if parse_tree.minor is None else parse_tree.major
kwds['minor'] = parse_tree.minor
if parse_tree.minor is not None:
kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor'])
else:
kwds['version'] = None
def init(self, *args, **kw):
self._version = kw.pop('version')
self._major = kw.pop('major')
self._minor = kw.pop('minor')
super().init(*args, **kw)
@property
def major(self):
return self._major
@property
def minor(self):
return self._minor
@property
def version(self):
return self._version
class ParameterizedMIMEHeader:
# Mixin that handles the params dict. Must be subclassed and
# a property value_parser for the specific header provided.
max_count = 1
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
if parse_tree.params is None:
kwds['params'] = {}
else:
# The MIME RFCs specify that parameter ordering is arbitrary.
kwds['params'] = {utils._sanitize(name).lower():
utils._sanitize(value)
for name, value in parse_tree.params}
def init(self, *args, **kw):
self._params = kw.pop('params')
super().init(*args, **kw)
@property
def params(self):
return MappingProxyType(self._params)
class ContentTypeHeader(ParameterizedMIMEHeader):
value_parser = staticmethod(parser.parse_content_type_header)
def init(self, *args, **kw):
super().init(*args, **kw)
self._maintype = utils._sanitize(self._parse_tree.maintype)
self._subtype = utils._sanitize(self._parse_tree.subtype)
@property
def maintype(self):
return self._maintype
@property
def subtype(self):
return self._subtype
@property
def content_type(self):
return self.maintype + '/' + self.subtype
class ContentDispositionHeader(ParameterizedMIMEHeader):
value_parser = staticmethod(parser.parse_content_disposition_header)
def init(self, *args, **kw):
super().init(*args, **kw)
cd = self._parse_tree.content_disposition
self._content_disposition = cd if cd is None else utils._sanitize(cd)
@property
def content_disposition(self):
return self._content_disposition
class ContentTransferEncodingHeader:
max_count = 1
value_parser = staticmethod(parser.parse_content_transfer_encoding_header)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
def init(self, *args, **kw):
super().init(*args, **kw)
self._cte = utils._sanitize(self._parse_tree.cte)
@property
def cte(self):
return self._cte
# The header factory #
_default_header_map = {
'subject': UniqueUnstructuredHeader,
'date': UniqueDateHeader,
'resent-date': DateHeader,
'orig-date': UniqueDateHeader,
'sender': UniqueSingleAddressHeader,
'resent-sender': SingleAddressHeader,
'to': UniqueAddressHeader,
'resent-to': AddressHeader,
'cc': UniqueAddressHeader,
'resent-cc': AddressHeader,
'bcc': UniqueAddressHeader,
'resent-bcc': AddressHeader,
'from': UniqueAddressHeader,
'resent-from': AddressHeader,
'reply-to': UniqueAddressHeader,
'mime-version': MIMEVersionHeader,
'content-type': ContentTypeHeader,
'content-disposition': ContentDispositionHeader,
'content-transfer-encoding': ContentTransferEncodingHeader,
}
class HeaderRegistry:
"""A header_factory and header registry."""
def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
use_default_map=True):
"""Create a header_factory that works with the Policy API.
base_class is the class that will be the last class in the created
header class's __bases__ list. default_class is the class that will be
used if "name" (see __call__) does not appear in the registry.
use_default_map controls whether or not the default mapping of names to
specialized classes is copied in to the registry when the factory is
created. The default is True.
"""
self.registry = {}
self.base_class = base_class
self.default_class = default_class
if use_default_map:
self.registry.update(_default_header_map)
def map_to_type(self, name, cls):
"""Register cls as the specialized class for handling "name" headers.
"""
self.registry[name.lower()] = cls
def __getitem__(self, name):
cls = self.registry.get(name.lower(), self.default_class)
return type('_'+cls.__name__, (cls, self.base_class), {})
def __call__(self, name, value):
"""Create a header instance for header 'name' from 'value'.
Creates a header instance by creating a specialized class for parsing
and representing the specified header by combining the factory
base_class with a specialized class from the registry or the
default_class, and passing the name and value to the constructed
class's constructor.
"""
return self[name](name, value)
|
1013553207/django
|
refs/heads/master
|
django/contrib/admin/templatetags/admin_urls.py
|
553
|
from django import template
from django.contrib.admin.utils import quote
from django.core.urlresolvers import Resolver404, get_script_prefix, resolve
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
register = template.Library()
@register.filter
def admin_urlname(value, arg):
return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg)
@register.filter
def admin_urlquote(value):
return quote(value)
@register.simple_tag(takes_context=True)
def add_preserved_filters(context, url, popup=False, to_field=None):
opts = context.get('opts')
preserved_filters = context.get('preserved_filters')
parsed_url = list(urlparse(url))
parsed_qs = dict(parse_qsl(parsed_url[4]))
merged_qs = dict()
if opts and preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters))
match_url = '/%s' % url.partition(get_script_prefix())[2]
try:
match = resolve(match_url)
except Resolver404:
pass
else:
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if changelist_url == current_url and '_changelist_filters' in preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))
merged_qs.update(preserved_filters)
if popup:
from django.contrib.admin.options import IS_POPUP_VAR
merged_qs[IS_POPUP_VAR] = 1
if to_field:
from django.contrib.admin.options import TO_FIELD_VAR
merged_qs[TO_FIELD_VAR] = to_field
merged_qs.update(parsed_qs)
parsed_url[4] = urlencode(merged_qs)
return urlunparse(parsed_url)
|
xianjunzhengbackup/Cloud-Native-Python
|
refs/heads/master
|
env/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/contrib/__init__.py
|
12133432
| |
hinrek/Suvepraktika
|
refs/heads/Django-working
|
mysite/formats/et/__init__.py
|
12133432
| |
david415/tahoe-lafs
|
refs/heads/master
|
src/allmydata/immutable/downloader/__init__.py
|
12133432
| |
klahnakoski/ActiveData-ETL
|
refs/heads/dev
|
vendor/mo_math/vendor/__init__.py
|
12133432
| |
liavkoren/djangoDev
|
refs/heads/master
|
django/conf/locale/es/__init__.py
|
12133432
| |
niteoweb/libcloud
|
refs/heads/niteoweb_internal_release
|
docs/examples/dns/list_zone_records.py
|
64
|
from libcloud.dns.providers import get_driver
from libcloud.dns.types import Provider
CREDENTIALS_ZERIGO = ('email', 'api key')
ZONE_ID = 'example.myzone.com'
Cls = get_driver(Provider.ZERIGO)
driver = Cls(*CREDENTIALS_ZERIGO)
zone = driver.get_zone(zone_id=ZONE_ID)
records = driver.list_records(zone=zone)
|
zachwick/linux
|
refs/heads/master
|
Documentation/target/tcm_mod_builder.py
|
215
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
buf += " &tpg->se_tpg, tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = " + fabric_mod_name + ",\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
MillerDix/NEChromeX
|
refs/heads/master
|
flaskTest/venv/lib/python2.7/site-packages/werkzeug/routing.py
|
87
|
# -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import difflib
import re
import uuid
import posixpath
from pprint import pformat
from threading import Lock
from werkzeug.urls import url_encode, url_quote, url_join
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \
BadHost
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
from werkzeug.utils import cached_property
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE | re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum([
0.98 * difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods)
])
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append('Could not build url for endpoint %r' % self.endpoint)
if self.method:
message.append(' (%r)' % self.method)
if self.values:
message.append(' with values %r' % sorted(self.values.keys()))
message.append('.')
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(' Did you mean to use methods %r?' % sorted(
self.suggested.methods
))
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
' Did you forget to specify values %r?' %
sorted(missing_values)
)
else:
message.append(
' Did you mean %r instead?' % self.suggested.endpoint
)
return u''.join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
if isinstance(methods, str):
raise TypeError('param `methods` should be `Iterable[str]`, not `str`')
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(defaults=defaults, subdomain=self.subdomain,
methods=self.methods, build_only=self.build_only,
endpoint=self.endpoint, strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to, alias=self.alias,
host=self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__') and \
(method is None or self.methods is None or
method in self.methods):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None
and u' (%s)' % u', '.join(self.methods)
or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \
r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}'
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter,
'uuid': UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if 'HTTP_HOST' in environ:
wsgi_server_name = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' \
and wsgi_server_name.endswith(':80'):
wsgi_server_name = wsgi_server_name[:-3]
elif environ['wsgi.url_scheme'] == 'https' \
and wsgi_server_name.endswith(':443'):
wsgi_server_name = wsgi_server_name[:-4]
else:
wsgi_server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|%s' % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and '/%s' % path_info.lstrip('/')
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
url_quote(path_info, self.map.charset,
safe='/:|+') + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(url_join('%s://%s%s%s' % (
self.url_scheme or 'http',
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme or 'http',
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
path_info.lstrip('/')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = iteritems(values, multi=True)
else:
valueiter = iteritems(values)
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)
):
return str(url_join(self.script_name, './' + path.lstrip('/')))
return str('%s//%s%s/%s' % (
self.url_scheme + ':' if self.url_scheme else '',
host,
self.script_name[:-1],
path.lstrip('/')
))
|
tornadozou/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rnn/__init__.py
|
34
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN Cells and additional RNN operations.
See @{$python/contrib.rnn} guide.
<!--From core-->
@@RNNCell
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
@@LSTMStateTuple
@@DropoutWrapper
@@MultiRNNCell
@@DeviceWrapper
@@ResidualWrapper
<!--Used to be in core, but kept in contrib.-->
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
<!--Created in contrib, eventual plans to move to core.-->
@@LayerNormBasicLSTMCell
@@LSTMBlockWrapper
@@LSTMBlockCell
@@GRUBlockCell
@@GRUBlockCellV2
@@FusedRNNCell
@@FusedRNNCellAdaptor
@@TimeReversedFusedRNN
@@LSTMBlockFusedCell
@@CoupledInputForgetGateLSTMCell
@@TimeFreqLSTMCell
@@GridLSTMCell
@@BidirectionalGridLSTMCell
@@NASCell
@@UGRNNCell
@@IntersectionRNNCell
@@PhasedLSTMCell
@@ConvLSTMCell
@@Conv1DLSTMCell
@@Conv2DLSTMCell
@@Conv3DLSTMCell
@@HighwayWrapper
@@GLSTMCell
<!--RNNCell wrappers-->
@@AttentionCellWrapper
@@CompiledWrapper
<!--RNN functions-->
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
@@stack_bidirectional_dynamic_rnn
@@stack_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import EmbeddingWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import InputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import *
from tensorflow.contrib.rnn.python.ops.gru_ops import *
from tensorflow.contrib.rnn.python.ops.lstm_ops import *
from tensorflow.contrib.rnn.python.ops.rnn import *
from tensorflow.contrib.rnn.python.ops.rnn_cell import *
from tensorflow.python.ops.rnn import static_bidirectional_rnn
from tensorflow.python.ops.rnn import static_rnn
from tensorflow.python.ops.rnn import static_state_saving_rnn
from tensorflow.python.ops.rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
MyAOSP/external_chromium_org
|
refs/heads/kk-4.4
|
tools/generate_stubs/generate_stubs_unittest.py
|
69
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for the generate_stubs.py.
Since generate_stubs.py is a code generator, it is hard to do a very good
test. Instead of creating a golden-file test, which might be flakey, this
test elects instead to verify that various components "exist" within the
generated file as a sanity check. In particular, there is a simple hit
test to make sure that umbrella functions, etc., do try and include every
function they are responsible for invoking. Missing an invocation is quite
easily missed.
There is no attempt to verify ordering of different components, or whether
or not those components are going to parse incorrectly because of prior
errors or positioning. Most of that should be caught really fast anyways
during any attempt to use a badly behaving script.
"""
import generate_stubs as gs
import re
import StringIO
import sys
import unittest
def _MakeSignature(return_type, name, params):
return {'return_type': return_type,
'name': name,
'params': params}
SIMPLE_SIGNATURES = [
('int foo(int a)', _MakeSignature('int', 'foo', ['int a'])),
('int bar(int a, double b)', _MakeSignature('int', 'bar',
['int a', 'double b'])),
('int baz(void)', _MakeSignature('int', 'baz', ['void'])),
('void quux(void)', _MakeSignature('void', 'quux', ['void'])),
('void waldo(void);', _MakeSignature('void', 'waldo', ['void'])),
('int corge(void);', _MakeSignature('int', 'corge', ['void'])),
]
TRICKY_SIGNATURES = [
('const struct name *foo(int a, struct Test* b); ',
_MakeSignature('const struct name *',
'foo',
['int a', 'struct Test* b'])),
('const struct name &foo(int a, struct Test* b);',
_MakeSignature('const struct name &',
'foo',
['int a', 'struct Test* b'])),
('const struct name &_foo(int a, struct Test* b);',
_MakeSignature('const struct name &',
'_foo',
['int a', 'struct Test* b'])),
('struct name const * const _foo(int a, struct Test* b) '
'__attribute__((inline));',
_MakeSignature('struct name const * const',
'_foo',
['int a', 'struct Test* b']))
]
INVALID_SIGNATURES = ['I am bad', 'Seriously bad(', ';;;']
class GenerateStubModuleFunctionsUnittest(unittest.TestCase):
def testExtractModuleName(self):
self.assertEqual('somefile-2', gs.ExtractModuleName('somefile-2.ext'))
def testParseSignatures_EmptyFile(self):
# Empty file just generates empty signatures.
infile = StringIO.StringIO()
signatures = gs.ParseSignatures(infile)
self.assertEqual(0, len(signatures))
def testParseSignatures_SimpleSignatures(self):
file_contents = '\n'.join([x[0] for x in SIMPLE_SIGNATURES])
infile = StringIO.StringIO(file_contents)
signatures = gs.ParseSignatures(infile)
self.assertEqual(len(SIMPLE_SIGNATURES), len(signatures))
# We assume signatures are in order.
for i in xrange(len(SIMPLE_SIGNATURES)):
self.assertEqual(SIMPLE_SIGNATURES[i][1], signatures[i],
msg='Expected %s\nActual %s\nFor %s' %
(SIMPLE_SIGNATURES[i][1],
signatures[i],
SIMPLE_SIGNATURES[i][0]))
def testParseSignatures_TrickySignatures(self):
file_contents = '\n'.join([x[0] for x in TRICKY_SIGNATURES])
infile = StringIO.StringIO(file_contents)
signatures = gs.ParseSignatures(infile)
self.assertEqual(len(TRICKY_SIGNATURES), len(signatures))
# We assume signatures are in order.
for i in xrange(len(TRICKY_SIGNATURES)):
self.assertEqual(TRICKY_SIGNATURES[i][1], signatures[i],
msg='Expected %s\nActual %s\nFor %s' %
(TRICKY_SIGNATURES[i][1],
signatures[i],
TRICKY_SIGNATURES[i][0]))
def testParseSignatures_InvalidSignatures(self):
for i in INVALID_SIGNATURES:
infile = StringIO.StringIO(i)
self.assertRaises(gs.BadSignatureError, gs.ParseSignatures, infile)
def testParseSignatures_CommentsIgnored(self):
my_sigs = []
my_sigs.append('# a comment')
my_sigs.append(SIMPLE_SIGNATURES[0][0])
my_sigs.append('# another comment')
my_sigs.append(SIMPLE_SIGNATURES[0][0])
my_sigs.append('# a third comment')
my_sigs.append(SIMPLE_SIGNATURES[0][0])
file_contents = '\n'.join(my_sigs)
infile = StringIO.StringIO(file_contents)
signatures = gs.ParseSignatures(infile)
self.assertEqual(3, len(signatures))
class WindowsLibUnittest(unittest.TestCase):
def testWriteWindowsDefFile(self):
module_name = 'my_module-1'
signatures = [sig[1] for sig in SIMPLE_SIGNATURES]
outfile = StringIO.StringIO()
gs.WriteWindowsDefFile(module_name, signatures, outfile)
contents = outfile.getvalue()
# Check that the file header is correct.
self.assertTrue(contents.startswith("""LIBRARY %s
EXPORTS
""" % module_name))
# Check that the signatures were exported.
for sig in signatures:
pattern = '\n %s\n' % sig['name']
self.assertTrue(re.search(pattern, contents),
msg='Expected match of "%s" in %s' % (pattern, contents))
def testQuietRun(self):
output = StringIO.StringIO()
gs.QuietRun([sys.executable,
'-c', 'print "line 1 and suffix\\nline 2"'],
write_to=output)
self.assertEqual('line 1 and suffix\nline 2\n', output.getvalue())
output = StringIO.StringIO()
gs.QuietRun([sys.executable,
'-c', 'print "line 1 and suffix\\nline 2"'],
filter='line 1', write_to=output)
self.assertEqual('line 2\n', output.getvalue())
class PosixStubWriterUnittest(unittest.TestCase):
def setUp(self):
self.module_name = 'my_module-1'
self.signatures = [sig[1] for sig in SIMPLE_SIGNATURES]
self.out_dir = 'out_dir'
self.writer = gs.PosixStubWriter(self.module_name, self.signatures)
def testEnumName(self):
self.assertEqual('kModuleMy_module1',
gs.PosixStubWriter.EnumName(self.module_name))
def testIsInitializedName(self):
self.assertEqual('IsMy_module1Initialized',
gs.PosixStubWriter.IsInitializedName(self.module_name))
def testInitializeModuleName(self):
self.assertEqual(
'InitializeMy_module1',
gs.PosixStubWriter.InitializeModuleName(self.module_name))
def testUninitializeModuleName(self):
self.assertEqual(
'UninitializeMy_module1',
gs.PosixStubWriter.UninitializeModuleName(self.module_name))
def testStubFunctionPointer(self):
self.assertEqual(
'static int (*foo_ptr)(int a) = NULL;',
gs.PosixStubWriter.StubFunctionPointer(SIMPLE_SIGNATURES[0][1]))
def testStubFunction(self):
# Test for a signature with a return value and a parameter.
self.assertEqual("""extern int foo(int a) __attribute__((weak));
int foo(int a) {
return foo_ptr(a);
}""", gs.PosixStubWriter.StubFunction(SIMPLE_SIGNATURES[0][1]))
# Test for a signature with a void return value and no parameters.
self.assertEqual("""extern void waldo(void) __attribute__((weak));
void waldo(void) {
waldo_ptr();
}""", gs.PosixStubWriter.StubFunction(SIMPLE_SIGNATURES[4][1]))
def testWriteImplemenationContents(self):
outfile = StringIO.StringIO()
self.writer.WriteImplementationContents('my_namespace', outfile)
contents = outfile.getvalue()
# Verify namespace exists somewhere.
self.assertTrue(contents.find('namespace my_namespace {') != -1)
# Verify that each signature has an _ptr and a function call in the file.
# Check that the signatures were exported.
for sig in self.signatures:
decl = gs.PosixStubWriter.StubFunctionPointer(sig)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
# Verify that each signature has an stub function generated for it.
for sig in self.signatures:
decl = gs.PosixStubWriter.StubFunction(sig)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
# Find module initializer functions. Make sure all 3 exist.
decl = gs.PosixStubWriter.InitializeModuleName(self.module_name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.UninitializeModuleName(self.module_name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.IsInitializedName(self.module_name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
def testWriteHeaderContents(self):
# Data for header generation.
module_names = ['oneModule', 'twoModule']
# Make the header.
outfile = StringIO.StringIO()
self.writer.WriteHeaderContents(module_names, 'my_namespace', 'GUARD_',
outfile)
contents = outfile.getvalue()
# Check for namespace and header guard.
self.assertTrue(contents.find('namespace my_namespace {') != -1)
self.assertTrue(contents.find('#ifndef GUARD_') != -1)
# Check for umbrella initializer.
self.assertTrue(contents.find('InitializeStubs(') != -1)
# Check per-module declarations.
for name in module_names:
# Check for enums.
decl = gs.PosixStubWriter.EnumName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
# Check for module initializer functions.
decl = gs.PosixStubWriter.IsInitializedName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.InitializeModuleName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.UninitializeModuleName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
def testWriteUmbrellaInitializer(self):
# Data for header generation.
module_names = ['oneModule', 'twoModule']
# Make the header.
outfile = StringIO.StringIO()
self.writer.WriteUmbrellaInitializer(module_names, 'my_namespace', outfile)
contents = outfile.getvalue()
# Check for umbrella initializer declaration.
self.assertTrue(contents.find('bool InitializeStubs(') != -1)
# If the umbrella initializer is correctly written, each module will have
# its initializer called, checked, and uninitialized on failure. Sanity
# check that here.
for name in module_names:
# Check for module initializer functions.
decl = gs.PosixStubWriter.IsInitializedName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.InitializeModuleName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
decl = gs.PosixStubWriter.UninitializeModuleName(name)
self.assertTrue(contents.find(decl) != -1,
msg='Expected "%s" in %s' % (decl, contents))
if __name__ == '__main__':
unittest.main()
|
silizium/ardupilot
|
refs/heads/master
|
mk/PX4/Tools/genmsg/test/test_genmsg_command_line.py
|
216
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
|
da1z/intellij-community
|
refs/heads/master
|
python/lib/Lib/encodings/zlib_codec.py
|
533
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
kevclarx/ansible
|
refs/heads/devel
|
test/units/module_utils/basic/test_heuristic_log_sanitize.py
|
99
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.basic import heuristic_log_sanitize
class TestHeuristicLogSanitize(unittest.TestCase):
def setUp(self):
self.URL_SECRET = 'http://username:pas:word@foo.com/data'
self.SSH_SECRET = 'username:pas:word@foo.com/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {'host%s' % i:
{'pstack':
{'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']},
}}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
self.assertEquals(heuristic_log_sanitize(self.clean_data), self.clean_data)
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
self.assertNotIn('pas:word', url_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(self.url_data))
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', ssh_output)
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
|
dezynetechnologies/odoo
|
refs/heads/8.0
|
addons/account/tests/test_search.py
|
225
|
from openerp.tests.common import TransactionCase
class TestSearch(TransactionCase):
"""Tests for search on name_search (account.account)
The name search on account.account is quite complexe, make sure
we have all the correct results
"""
def setUp(self):
super(TestSearch, self).setUp()
cr, uid = self.cr, self.uid
self.account_model = self.registry('account.account')
self.account_type_model = self.registry('account.account.type')
self.res_partner_model = self.registry('res.partner')
self.account_payment_term_model = self.registry('account.payment.term')
ac_ids = self.account_type_model.search(cr, uid, [], limit=1)
self.atax = (int(self.account_model.create(cr, uid, dict(
name="Tax Received",
code="121",
user_type=ac_ids[0],
))), "121 Tax Received")
self.apurchase = (int(self.account_model.create(cr, uid, dict(
name="Purchased Stocks",
code="1101",
user_type=ac_ids[0],
))), "1101 Purchased Stocks")
self.asale = (int(self.account_model.create(cr, uid, dict(
name="Product Sales",
code="200",
user_type=ac_ids[0],
))), "200 Product Sales")
self.all_ids = [self.atax[0], self.apurchase[0], self.asale[0]]
self.a_partner = self.res_partner_model.create(cr, uid, {'name':'test partner'})
self.a_payment_term = self.account_payment_term_model.create(cr, uid, {'name':'test payment term'})
def test_name_search(self):
cr, uid = self.cr, self.uid
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0]]), set([a[0] for a in atax_ids]), "name_search 'ilike Tax' should have returned Tax Received account only")
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0], self.asale[0]]), set([a[0] for a in atax_ids]), "name_search 'not ilike Tax' should have returned all but Tax Received account")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0]]), set([a[0] for a in apur_ids]), "name_search 'ilike 1101' should have returned Purchased Stocks account only")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.asale[0]]), set([a[0] for a in apur_ids]), "name_search 'not ilike 1101' should have returned all but Purchased Stocks account")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike 200 Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike 200 Sales' should have returned all but Product Sales account")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike Product Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike Product Sales' should have returned all but Product Sales account")
def test_property_unset_search(self):
cr, uid = self.cr, self.uid
partner_ids = self.res_partner_model.search(cr, uid, [('property_payment_term', '=', False), ('id', '=', self.a_partner)])
self.assertTrue(partner_ids, "unset property field 'propety_payment_term' should have been found")
self.res_partner_model.write(cr, uid, [self.a_partner], {'property_payment_term': self.a_payment_term})
partner_ids = self.res_partner_model.search(cr, uid, [('property_payment_term', '=', False), ('id', '=', self.a_partner)])
self.assertFalse(partner_ids, "set property field 'propety_payment_term' should not have been found")
|
anryko/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/aci_bd.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_bd
short_description: Manage Bridge Domains (BD) objects (fv:BD)
description:
- Manages Bridge Domains (BD) on Cisco ACI fabrics.
version_added: '2.4'
options:
arp_flooding:
description:
- Determines if the Bridge Domain should flood ARP traffic.
- The APIC defaults to C(no) when unset during creation.
type: bool
bd:
description:
- The name of the Bridge Domain.
type: str
aliases: [ bd_name, name ]
bd_type:
description:
- The type of traffic on the Bridge Domain.
- The APIC defaults to C(ethernet) when unset during creation.
type: str
choices: [ ethernet, fc ]
description:
description:
- Description for the Bridge Domain.
type: str
enable_multicast:
description:
- Determines if PIM is enabled.
- The APIC defaults to C(no) when unset during creation.
type: bool
enable_routing:
description:
- Determines if IP forwarding should be allowed.
- The APIC defaults to C(yes) when unset during creation.
type: bool
endpoint_clear:
description:
- Clears all End Points in all Leaves when C(yes).
- The value is not reset to disabled once End Points have been cleared; that requires a second task.
- The APIC defaults to C(no) when unset during creation.
type: bool
endpoint_move_detect:
description:
- Determines if GARP should be enabled to detect when End Points move.
- The APIC defaults to C(garp) when unset during creation.
type: str
choices: [ default, garp ]
endpoint_retention_action:
description:
- Determines if the Bridge Domain should inherit or resolve the End Point Retention Policy.
- The APIC defaults to C(resolve) when unset during creation.
type: str
choices: [ inherit, resolve ]
endpoint_retention_policy:
description:
- The name of the End Point Retention Policy the Bridge Domain should use when
overriding the default End Point Retention Policy.
type: str
igmp_snoop_policy:
description:
- The name of the IGMP Snooping Policy the Bridge Domain should use when
overriding the default IGMP Snooping Policy.
type: str
ip_learning:
description:
- Determines if the Bridge Domain should learn End Point IPs.
- The APIC defaults to C(yes) when unset during creation.
type: bool
ipv6_nd_policy:
description:
- The name of the IPv6 Neighbor Discovery Policy the Bridge Domain should use when
overridding the default IPV6 ND Policy.
type: str
l2_unknown_unicast:
description:
- Determines what forwarding method to use for unknown l2 destinations.
- The APIC defaults to C(proxy) when unset during creation.
type: str
choices: [ proxy, flood ]
l3_unknown_multicast:
description:
- Determines the forwarding method to use for unknown multicast destinations.
- The APIC defaults to C(flood) when unset during creation.
type: str
choices: [ flood, opt-flood ]
limit_ip_learn:
description:
- Determines if the BD should limit IP learning to only subnets owned by the Bridge Domain.
- The APIC defaults to C(yes) when unset during creation.
type: bool
mac_address:
description:
- The MAC Address to assign to the C(bd) instead of using the default.
- The APIC defaults to C(00:22:BD:F8:19:FF) when unset during creation.
type: str
aliases: [ mac ]
version_added: '2.5'
multi_dest:
description:
- Determines the forwarding method for L2 multicast, broadcast, and link layer traffic.
- The APIC defaults to C(bd-flood) when unset during creation.
type: str
choices: [ bd-flood, drop, encap-flood ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
type: str
aliases: [ tenant_name ]
vrf:
description:
- The name of the VRF.
type: str
aliases: [ vrf_name ]
extends_documentation_fragment: aci
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
seealso:
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:BD).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- name: Add Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
tenant: prod
bd: web_servers
mac_address: 00:22:BD:F8:19:FE
vrf: prod_vrf
state: present
delegate_to: localhost
- name: Add an FC Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
tenant: prod
bd: storage
bd_type: fc
vrf: fc_vrf
enable_routing: no
state: present
delegate_to: localhost
- name: Modify a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
arp_flooding: yes
l2_unknown_unicast: flood
state: present
delegate_to: localhost
- name: Query All Bridge Domains
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
state: query
delegate_to: localhost
register: query_result
- name: Query a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
state: query
delegate_to: localhost
register: query_result
- name: Delete a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
arp_flooding=dict(type='bool'),
bd=dict(type='str', aliases=['bd_name', 'name']), # Not required for querying all objects
bd_type=dict(type='str', choices=['ethernet', 'fc']),
description=dict(type='str'),
enable_multicast=dict(type='bool'),
enable_routing=dict(type='bool'),
endpoint_clear=dict(type='bool'),
endpoint_move_detect=dict(type='str', choices=['default', 'garp']),
endpoint_retention_action=dict(type='str', choices=['inherit', 'resolve']),
endpoint_retention_policy=dict(type='str'),
igmp_snoop_policy=dict(type='str'),
ip_learning=dict(type='bool'),
ipv6_nd_policy=dict(type='str'),
l2_unknown_unicast=dict(type='str', choices=['proxy', 'flood']),
l3_unknown_multicast=dict(type='str', choices=['flood', 'opt-flood']),
limit_ip_learn=dict(type='bool'),
mac_address=dict(type='str', aliases=['mac']),
multi_dest=dict(type='str', choices=['bd-flood', 'drop', 'encap-flood']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
vrf=dict(type='str', aliases=['vrf_name']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd', 'tenant']],
['state', 'present', ['bd', 'tenant']],
],
)
aci = ACIModule(module)
arp_flooding = aci.boolean(module.params.get('arp_flooding'))
bd = module.params.get('bd')
bd_type = module.params.get('bd_type')
if bd_type == 'ethernet':
# ethernet type is represented as regular, but that is not clear to the users
bd_type = 'regular'
description = module.params.get('description')
enable_multicast = aci.boolean(module.params.get('enable_multicast'))
enable_routing = aci.boolean(module.params.get('enable_routing'))
endpoint_clear = aci.boolean(module.params.get('endpoint_clear'))
endpoint_move_detect = module.params.get('endpoint_move_detect')
if endpoint_move_detect == 'default':
# the ACI default setting is an empty string, but that is not a good input value
endpoint_move_detect = ''
endpoint_retention_action = module.params.get('endpoint_retention_action')
endpoint_retention_policy = module.params.get('endpoint_retention_policy')
igmp_snoop_policy = module.params.get('igmp_snoop_policy')
ip_learning = aci.boolean(module.params.get('ip_learning'))
ipv6_nd_policy = module.params.get('ipv6_nd_policy')
l2_unknown_unicast = module.params.get('l2_unknown_unicast')
l3_unknown_multicast = module.params.get('l3_unknown_multicast')
limit_ip_learn = aci.boolean(module.params.get('limit_ip_learn'))
mac_address = module.params.get('mac_address')
multi_dest = module.params.get('multi_dest')
state = module.params.get('state')
tenant = module.params.get('tenant')
vrf = module.params.get('vrf')
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
module_object=bd,
target_filter={'name': bd},
),
child_classes=['fvRsCtx', 'fvRsIgmpsn', 'fvRsBDToNdP', 'fvRsBdToEpRet'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvBD',
class_config=dict(
arpFlood=arp_flooding,
descr=description,
epClear=endpoint_clear,
epMoveDetectMode=endpoint_move_detect,
ipLearning=ip_learning,
limitIpLearnToSubnets=limit_ip_learn,
mac=mac_address,
mcastAllow=enable_multicast,
multiDstPktAct=multi_dest,
name=bd,
type=bd_type,
unicastRoute=enable_routing,
unkMacUcastAct=l2_unknown_unicast,
unkMcastAct=l3_unknown_multicast,
),
child_configs=[
{'fvRsCtx': {'attributes': {'tnFvCtxName': vrf}}},
{'fvRsIgmpsn': {'attributes': {'tnIgmpSnoopPolName': igmp_snoop_policy}}},
{'fvRsBDToNdP': {'attributes': {'tnNdIfPolName': ipv6_nd_policy}}},
{'fvRsBdToEpRet': {'attributes': {'resolveAct': endpoint_retention_action, 'tnFvEpRetPolName': endpoint_retention_policy}}},
],
)
aci.get_diff(aci_class='fvBD')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
LingxiaoJIA/gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/simd64/floating_point/arithmetic/addition.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# PFADD
'''
|
bhargav2408/kivy
|
refs/heads/master
|
kivy/core/clipboard/clipboard_xsel.py
|
36
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard._clipboard_ext import ClipboardExternalBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardExternalBase):
@staticmethod
def _clip(inout, selection):
pipe = {'std' + inout: subprocess.PIPE}
sel = 'b' if selection == 'clipboard' else selection[0]
io = inout[0]
return subprocess.Popen(
['xsel', '-' + sel + io], **pipe)
|
dna2github/dna2sevord
|
refs/heads/main
|
computer/algorithm/py/disjointset.py
|
1
|
class DisjointSet(object):
array = None
parent = {}
rank = {}
def __init__(self, array=None):
if array:
self.build(array)
def build(self, array):
self.array = array[:]
for i, v in enumerate(array):
self.parent[v] = i
self.rank[v] = 0
def find(self, val):
t = val
p = self.parent[val]
v = self.array[p]
while (t != v):
p = self.parent[v]
t = v
v = self.array[p]
# path compress
rp = p
rv = self.array[rp]
p = self.parent[val]
self.parent[val] = rp
while (rp != p):
v = self.array[p]
p = self.parent[v]
self.parent[v] = rp
return p, v
def union(self, val1, val2):
p1, v1 = self.find(val1)
p2, v2 = self.find(val2)
if self.rank[v1] < self.rank[v2]:
self.parent[v2] = p1
else:
self.parent[v1] = p2
if self.rank[v1] == self.rank[v2]: self.rank[v2] += 1
if __name__ == '__main__':
s = DisjointSet([1,2,3,4,5,6,7,8,9])
s.union(1, 2)
s.union(3, 4)
s.union(5, 6)
s.union(7, 8)
s.union(2, 4)
s.union(8, 9)
s.union(6, 8)
s.find(5)
s.union(4, 8)
s.find(1)
print(s.rank, s.parent, s.array)
|
rychipman/mongo-python-driver
|
refs/heads/master
|
test/test_client.py
|
8
|
# Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the mongo_client module."""
import contextlib
import datetime
import os
import socket
import struct
import sys
import time
import traceback
import warnings
sys.path[0:0] = [""]
from bson import BSON
from bson.codec_options import CodecOptions
from bson.py3compat import thread, u
from bson.son import SON
from bson.tz_util import utc
from pymongo import auth, message
from pymongo.cursor import CursorType
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure,
CursorNotFound,
NetworkTimeout,
InvalidURI)
from pymongo.mongo_client import MongoClient
from pymongo.pool import SocketInfo
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test import (client_context,
client_knobs,
host,
pair,
port,
SkipTest,
unittest,
IntegrationTest,
db_pwd,
db_user,
MockClientTest)
from test.pymongo_mocks import MockClient
from test.utils import (assertRaisesExactly,
delay,
remove_all_users,
server_is_master_with_slave,
get_pool,
one,
connected,
wait_until,
rs_or_single_client,
rs_or_single_client_noauth,
lazy_client_trial,
NTHREADS)
class ClientUnitTest(unittest.TestCase):
"""MongoClient tests that don't require a server."""
@classmethod
def setUpClass(cls):
cls.client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100)
def test_keyword_arg_defaults(self):
client = MongoClient(socketTimeoutMS=None,
connectTimeoutMS=20000,
waitQueueTimeoutMS=None,
waitQueueMultiple=None,
socketKeepAlive=False,
replicaSet=None,
read_preference=ReadPreference.PRIMARY,
ssl=False,
ssl_keyfile=None,
ssl_certfile=None,
ssl_cert_reqs=0, # ssl.CERT_NONE
ssl_ca_certs=None,
connect=False,
serverSelectionTimeoutMS=12000)
options = client._MongoClient__options
pool_opts = options.pool_options
self.assertEqual(None, pool_opts.socket_timeout)
# socket.Socket.settimeout takes a float in seconds
self.assertEqual(20.0, pool_opts.connect_timeout)
self.assertEqual(None, pool_opts.wait_queue_timeout)
self.assertEqual(None, pool_opts.wait_queue_multiple)
self.assertFalse(pool_opts.socket_keepalive)
self.assertEqual(None, pool_opts.ssl_context)
self.assertEqual(None, options.replica_set_name)
self.assertEqual(ReadPreference.PRIMARY, client.read_preference)
self.assertAlmostEqual(12, client.server_selection_timeout)
def test_types(self):
self.assertRaises(TypeError, MongoClient, 1)
self.assertRaises(TypeError, MongoClient, 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", "27017")
self.assertRaises(TypeError, MongoClient, "localhost", 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", [])
self.assertRaises(ConfigurationError, MongoClient, [])
def test_max_pool_size_zero(self):
with self.assertRaises(ValueError):
MongoClient(maxPoolSize=0)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.client, "")
self.assertRaises(InvalidName, make_db, self.client, "te$t")
self.assertRaises(InvalidName, make_db, self.client, "te.t")
self.assertRaises(InvalidName, make_db, self.client, "te\\t")
self.assertRaises(InvalidName, make_db, self.client, "te/t")
self.assertRaises(InvalidName, make_db, self.client, "te st")
self.assertTrue(isinstance(self.client.test, Database))
self.assertEqual(self.client.test, self.client["test"])
self.assertEqual(self.client.test, Database(self.client, "test"))
def test_get_database(self):
codec_options = CodecOptions(tz_aware=True)
write_concern = WriteConcern(w=2, j=True)
db = self.client.get_database(
'foo', codec_options, ReadPreference.SECONDARY, write_concern)
self.assertEqual('foo', db.name)
self.assertEqual(codec_options, db.codec_options)
self.assertEqual(ReadPreference.SECONDARY, db.read_preference)
self.assertEqual(write_concern, db.write_concern)
def test_getattr(self):
self.assertTrue(isinstance(self.client['_does_not_exist'], Database))
with self.assertRaises(AttributeError) as context:
self.client._does_not_exist
# Message should be:
# "AttributeError: MongoClient has no attribute '_does_not_exist'. To
# access the _does_not_exist database, use client['_does_not_exist']".
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
def iterate():
[a for a in self.client]
self.assertRaises(TypeError, iterate)
def test_get_default_database(self):
c = MongoClient("mongodb://%s:%d/foo" % (host, port), connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_get_default_database_error(self):
# URI with no database.
c = MongoClient("mongodb://%s:%d/" % (host, port), connect=False)
self.assertRaises(ConfigurationError, c.get_default_database)
def test_get_default_database_with_authsource(self):
# Ensure we distinguish database name from authSource.
uri = "mongodb://%s:%d/foo?authSource=src" % (host, port)
c = MongoClient(uri, connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
class TestClient(IntegrationTest):
def test_constants(self):
# Set bad defaults.
MongoClient.HOST = "somedomainthatdoesntexist.org"
MongoClient.PORT = 123456789
with self.assertRaises(AutoReconnect):
connected(MongoClient(serverSelectionTimeoutMS=10))
# Override the defaults. No error.
connected(MongoClient(host, port))
# Set good defaults.
MongoClient.HOST = host
MongoClient.PORT = port
# No error.
connected(MongoClient())
def test_init_disconnected(self):
c = rs_or_single_client(connect=False)
self.assertIsInstance(c.is_primary, bool)
self.assertIsInstance(c.is_mongos, bool)
self.assertIsInstance(c.max_pool_size, int)
self.assertIsInstance(c.nodes, frozenset)
self.assertEqual(c.codec_options, CodecOptions())
self.assertIsInstance(c.max_bson_size, int)
self.assertIsInstance(c.max_write_batch_size, int)
self.assertFalse(c.primary)
self.assertFalse(c.secondaries)
c.pymongo_test.command('ismaster') # Auto-connect.
if client_context.is_rs:
# The primary's host and port are from the replica set config.
self.assertIsNotNone(c.address)
else:
self.assertEqual(c.address, (host, port))
bad_host = "somedomainthatdoesntexist.org"
c = MongoClient(bad_host, port, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_init_disconnected_with_auth(self):
uri = "mongodb://user:pass@somedomainthatdoesntexist"
c = MongoClient(uri, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_equality(self):
c = connected(rs_or_single_client())
self.assertEqual(client_context.rs_or_standalone_client, c)
# Explicitly test inequality
self.assertFalse(client_context.rs_or_standalone_client != c)
def test_host_w_port(self):
with self.assertRaises(ValueError):
connected(MongoClient("%s:1234567" % host, connectTimeoutMS=1,
serverSelectionTimeoutMS=10))
def test_repr(self):
# Used to test 'eval' below.
import bson
client = MongoClient(
'mongodb://localhost:27017,localhost:27018/?replicaSet=replset'
'&connectTimeoutMS=12345',
connect=False, document_class=SON)
the_repr = repr(client)
self.assertIn('MongoClient(host=', the_repr)
self.assertIn(
"document_class=bson.son.SON, "
"tz_aware=False, "
"connect=False, ",
the_repr)
self.assertIn("connecttimeoutms='12345'", the_repr)
self.assertIn("replicaset=", the_repr)
self.assertEqual(eval(the_repr), client)
@client_context.require_replica_set
def test_repr_replica_set(self):
self.assertIn("MongoClient(host=[", repr(self.client))
self.assertIn(pair, repr(self.client))
def test_getters(self):
self.assertEqual(client_context.client.address, (host, port))
self.assertEqual(client_context.nodes, self.client.nodes)
def test_database_names(self):
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test_mike.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.client.drop_database, 5)
self.assertRaises(TypeError, self.client.drop_database, None)
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test2.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertIn("pymongo_test", dbs)
self.assertIn("pymongo_test2", dbs)
self.client.drop_database("pymongo_test")
self.client.drop_database(self.client.pymongo_test2)
raise SkipTest("This test often fails due to SERVER-2329")
dbs = self.client.database_names()
self.assertNotIn("pymongo_test", dbs)
self.assertNotIn("pymongo_test2", dbs)
def test_close(self):
coll = self.client.pymongo_test.bar
self.client.close()
self.client.close()
coll.count()
self.client.close()
self.client.close()
coll.count()
def test_bad_uri(self):
with self.assertRaises(InvalidURI):
MongoClient("http://localhost")
@client_context.require_auth
def test_auth_from_uri(self):
self.client.admin.add_user("admin", "pass", roles=["root"])
self.addCleanup(self.client.admin.remove_user, 'admin')
self.addCleanup(remove_all_users, self.client.pymongo_test)
self.client.pymongo_test.add_user(
"user", "pass", roles=['userAdmin', 'readWrite'])
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(
"mongodb://a:b@%s:%d" % (host, port)))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://admin:pass@%s:%d" % (host, port)))
# Wrong database.
uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port)
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(uri))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port)))
# Auth with lazy connection.
rs_or_single_client(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port),
connect=False).pymongo_test.test.find_one()
# Wrong password.
bad_client = rs_or_single_client(
"mongodb://user:wrong@%s:%d/pymongo_test" % (host, port),
connect=False)
self.assertRaises(OperationFailure,
bad_client.pymongo_test.test.find_one)
@client_context.require_auth
def test_multiple_logins(self):
self.client.pymongo_test.add_user('user1', 'pass', roles=['readWrite'])
self.client.pymongo_test.add_user('user2', 'pass', roles=['readWrite'])
self.addCleanup(remove_all_users, self.client.pymongo_test)
client = rs_or_single_client_noauth(
"mongodb://user1:pass@%s:%d/pymongo_test" % (host, port))
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
# Can't log in to the same database with multiple users.
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
client.pymongo_test.logout()
with self.assertRaises(OperationFailure):
client.pymongo_test.test.find_one()
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
client.pymongo_test.authenticate('user1', 'pass')
client.pymongo_test.test.find_one()
@client_context.require_auth
def test_lazy_auth_raises_operation_failure(self):
lazy_client = rs_or_single_client(
"mongodb://user:wrong@%s/pymongo_test" % host, connect=False)
assertRaisesExactly(
OperationFailure, lazy_client.test.collection.find_one)
def test_unix_socket(self):
if not hasattr(socket, "AF_UNIX"):
raise SkipTest("UNIX-sockets are not supported on this system")
mongodb_socket = '/tmp/mongodb-27017.sock'
encoded_socket = '%2Ftmp%2Fmongodb-27017.sock'
if not os.access(mongodb_socket, os.R_OK):
raise SkipTest("Socket file is not accessible")
if client_context.auth_enabled:
uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, encoded_socket)
else:
uri = "mongodb://%s" % encoded_socket
# Confirm we can do operations via the socket.
client = MongoClient(uri)
client.pymongo_test.test.insert_one({"dummy": "object"})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
# Confirm it fails with a missing socket.
self.assertRaises(
ConnectionFailure,
connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock",
serverSelectionTimeoutMS=100))
def test_fork(self):
# Test using a client before and after a fork.
if sys.platform == "win32":
raise SkipTest("Can't fork on windows")
try:
import multiprocessing
except ImportError:
raise SkipTest("No multiprocessing module")
db = self.client.pymongo_test
# Ensure a socket is opened before the fork.
db.test.find_one()
def f(pipe):
try:
kill_cursors_executor = self.client._kill_cursors_executor
servers = self.client._topology.select_servers(
any_server_selector)
# In child, only the thread that called fork() is alive.
# The first operation should revive the rest.
db.test.find_one()
wait_until(
lambda: all(s._monitor._executor._thread.is_alive()
for s in servers),
"restart monitor threads")
wait_until(lambda: kill_cursors_executor._thread.is_alive(),
"restart kill-cursors executor")
except:
traceback.print_exc() # Aid debugging.
pipe.send(True)
parent_pipe, child_pipe = multiprocessing.Pipe()
p = multiprocessing.Process(target=f, args=(child_pipe,))
p.start()
p.join(10)
child_pipe.close()
# Pipe will only have data if the child process failed.
try:
parent_pipe.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = self.client
db = c.pymongo_test
db.test.insert_one({"x": 1})
self.assertEqual(dict, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c = rs_or_single_client(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
def test_timeouts(self):
client = rs_or_single_client(connectTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.connect_timeout)
client = rs_or_single_client(socketTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.socket_timeout)
def test_socket_timeout_ms_validation(self):
c = rs_or_single_client(socketTimeoutMS=10 * 1000)
self.assertEqual(10, get_pool(c).opts.socket_timeout)
c = connected(rs_or_single_client(socketTimeoutMS=None))
self.assertEqual(None, get_pool(c).opts.socket_timeout)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=0)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=-1)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=1e10)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS='foo')
def test_socket_timeout(self):
no_timeout = self.client
timeout_sec = 1
timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert_one({"x": 1})
# A $where clause that takes a second longer than the timeout
where_func = delay(timeout_sec + 1)
def get_x(db):
doc = next(db.test.find().where(where_func))
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test)
def test_server_selection_timeout(self):
client = MongoClient(serverSelectionTimeoutMS=100, connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(serverSelectionTimeoutMS=0, connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS="foo", connect=False)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS=-1, connect=False)
self.assertRaises(ConfigurationError, MongoClient,
serverSelectionTimeoutMS=None, connect=False)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
# Test invalid timeout in URI ignored and set to default.
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False)
self.assertAlmostEqual(30, client.server_selection_timeout)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False)
self.assertAlmostEqual(30, client.server_selection_timeout)
def test_waitQueueTimeoutMS(self):
client = rs_or_single_client(waitQueueTimeoutMS=2000)
self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2)
def test_waitQueueMultiple(self):
client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2)
pool = get_pool(client)
self.assertEqual(pool.opts.wait_queue_multiple, 2)
self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6)
def test_socketKeepAlive(self):
client = rs_or_single_client(socketKeepAlive=True)
self.assertTrue(get_pool(client).opts.socket_keepalive)
def test_tz_aware(self):
self.assertRaises(ValueError, MongoClient, tz_aware='foo')
aware = rs_or_single_client(tz_aware=True)
naive = self.client
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert_one({"x": now})
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
@client_context.require_ipv6
def test_ipv6(self):
if client_context.auth_enabled:
auth_str = "%s:%s@" % (db_user, db_pwd)
else:
auth_str = ""
uri = "mongodb://%s[::1]:%d" % (auth_str, port)
if client_context.is_rs:
uri += '/?replicaSet=' + client_context.replica_set_name
client = rs_or_single_client_noauth(uri)
client.pymongo_test.test.insert_one({"dummy": u("object")})
client.pymongo_test_bernie.test.insert_one({"dummy": u("object")})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
@client_context.require_no_mongos
def test_fsync_lock_unlock(self):
if (server_is_master_with_slave(client_context.client) and
client_context.version.at_least(2, 3, 0)):
raise SkipTest('SERVER-7714')
self.assertFalse(self.client.is_locked)
# async flushing not supported on windows...
if sys.platform not in ('cygwin', 'win32'):
self.client.fsync(async=True)
self.assertFalse(self.client.is_locked)
self.client.fsync(lock=True)
self.assertTrue(self.client.is_locked)
locked = True
self.client.unlock()
for _ in range(5):
locked = self.client.is_locked
if not locked:
break
time.sleep(1)
self.assertFalse(locked)
def test_contextlib(self):
client = rs_or_single_client()
client.pymongo_test.drop_collection("test")
client.pymongo_test.test.insert_one({"foo": "bar"})
# The socket used for the previous commands has been returned to the
# pool
self.assertEqual(1, len(get_pool(client).sockets))
with contextlib.closing(client):
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(1, len(get_pool(client).sockets))
self.assertEqual(0, len(get_pool(client).sockets))
with client as client:
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(0, len(get_pool(client).sockets))
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
# We can't figure out how to raise an exception on a thread that's
# blocked on a socket, whether that's the main thread or a worker,
# without simply killing the whole thread in Jython. This suggests
# PYTHON-294 can't actually occur in Jython.
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure MongoClient closes its
# socket if it gets an interrupt while waiting to recv() from it.
db = self.client.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert_one({'_id': 1})
def interrupter():
# Raises KeyboardInterrupt in the main thread
time.sleep(0.25)
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
next(db.foo.find({'$where': where}))
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
next(db.foo.find())
)
def test_operation_failure(self):
# Ensure MongoClient doesn't close socket after it gets an error
# response to getLastError. PYTHON-395.
pool = get_pool(self.client)
socket_count = len(pool.sockets)
self.assertGreaterEqual(socket_count, 1)
old_sock_info = next(iter(pool.sockets))
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_one({'_id': 'foo'})
self.assertRaises(
OperationFailure,
self.client.pymongo_test.test.insert_one, {'_id': 'foo'})
self.assertEqual(socket_count, len(pool.sockets))
new_sock_info = next(iter(pool.sockets))
self.assertEqual(old_sock_info, new_sock_info)
def test_kill_cursors(self):
if (client_context.is_mongos
and not client_context.version.at_least(2, 4, 7)):
# Old mongos sends incorrectly formatted error response when
# cursor isn't found, see SERVER-9738.
raise SkipTest("Can't test kill_cursors against old mongos")
self.collection = self.client.pymongo_test.test
self.collection.drop()
self.collection.insert_many([{'_id': i} for i in range(200)])
cursor = self.collection.find().batch_size(1)
next(cursor)
self.client.kill_cursors([cursor.cursor_id])
# Prevent killcursors from reaching the server while a getmore is in
# progress -- the server logs "Assertion: 16089:Cannot kill active
# cursor."
time.sleep(2)
def raises_cursor_not_found():
try:
next(cursor)
return False
except CursorNotFound:
return True
wait_until(raises_cursor_not_found, 'close cursor')
def test_kill_cursors_with_server_unavailable(self):
with client_knobs(kill_cursor_frequency=9999999):
client = MongoClient('doesnt exist', connect=False,
serverSelectionTimeoutMS=0)
# Wait for the first tick of the periodic kill-cursors to pass.
time.sleep(1)
# Enqueue a kill-cursors message.
client.close_cursor(1234, ('doesnt-exist', 27017))
with warnings.catch_warnings(record=True) as user_warnings:
client._process_kill_cursors_queue()
self.assertIn("couldn't close cursor on ('doesnt-exist', 27017)",
str(user_warnings[0].message))
def test_lazy_connect_w0(self):
# Ensure that connect-on-demand works when the first operation is
# an unacknowledged write. This exercises _writable_max_wire_version().
# Use a separate collection to avoid races where we're still
# completing an operation on a collection while the next test begins.
client = rs_or_single_client(connect=False, w=0)
client.test_lazy_connect_w0.test.insert_one({})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.delete_one({})
@client_context.require_no_mongos
def test_exhaust_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Ensure a socket.
connected(client)
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
with self.assertRaises(ConnectionFailure):
next(cursor)
self.assertTrue(sock_info.closed)
# The semaphore was decremented despite the error.
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
@client_context.require_auth
def test_auth_network_error(self):
# Make sure there's no semaphore leak if we get a network error
# when authenticating a new socket with cached credentials.
# Get a client with one socket so we detect if it's leaked.
c = connected(rs_or_single_client(maxPoolSize=1,
waitQueueTimeoutMS=1))
# Simulate an authenticate() call on a different socket.
credentials = auth._build_credentials_tuple(
'DEFAULT', 'admin', db_user, db_pwd, {})
c._cache_credentials('test', credentials, connect=False)
# Cause a network error on the actual socket.
pool = get_pool(c)
socket_info = one(pool.sockets)
socket_info.sock.close()
# SocketInfo.check_auth logs in with the new credential, but gets a
# socket.error. Should be reraised as AutoReconnect.
self.assertRaises(AutoReconnect, c.test.collection.find_one)
# No semaphore leak, the pool is allowed to make a new socket.
c.test.collection.find_one()
@client_context.require_no_replica_set
def test_connect_to_standalone_using_replica_set_name(self):
client = MongoClient(pair, replicaSet='anything',
serverSelectionTimeoutMS=100)
with self.assertRaises(AutoReconnect):
client.test.test.find_one()
@client_context.require_replica_set
def test_stale_getmore(self):
# A cursor is created, but its member goes down and is removed from
# the topology before the getMore message is sent. Test that
# MongoClient._send_message_with_response handles the error.
with self.assertRaises(AutoReconnect):
client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100,
replicaSet=client_context.replica_set_name)
client._send_message_with_response(
operation=message._GetMore('collection', 101, 1234),
address=('not-a-member', 27017))
class TestExhaustCursor(IntegrationTest):
"""Test that clients properly handle errors from exhaust cursors."""
def setUp(self):
super(TestExhaustCursor, self).setUp()
if client_context.is_mongos:
raise SkipTest("mongos doesn't support exhaust, SERVER-2627")
# mongod < 2.2.0 closes exhaust socket on error, so it behaves like
# test_exhaust_query_network_error. Here we test that on query error
# the client correctly keeps the socket *open* and checks it in.
@client_context.require_version_min(2, 2, 0)
def test_exhaust_query_server_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
sock_info = one(pool.sockets)
# This will cause OperationFailure in all mongo versions since
# the value for $orderby must be a document.
cursor = collection.find(
SON([('$query', {}), ('$orderby', True)]),
cursor_type=CursorType.EXHAUST)
self.assertRaises(OperationFailure, cursor.next)
self.assertFalse(sock_info.closed)
# The socket was checked in and the semaphore was decremented.
self.assertIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_server_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)])
self.addCleanup(client_context.client.pymongo_test.test.drop)
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
sock_info = one(pool.sockets)
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a server error on getmore.
def receive_message(operation, request_id):
# Discard the actual server response.
SocketInfo.receive_message(sock_info, operation, request_id)
# responseFlags bit 1 is QueryFailure.
msg = struct.pack('<iiiii', 1 << 1, 0, 0, 0, 0)
msg += BSON.encode({'$err': 'mock err', 'code': 0})
return msg
saved = sock_info.receive_message
sock_info.receive_message = receive_message
self.assertRaises(OperationFailure, list, cursor)
sock_info.receive_message = saved
# The socket is returned the pool and it still works.
self.assertEqual(200, collection.count())
self.assertIn(sock_info, pool.sockets)
def test_exhaust_query_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(ConnectionFailure, cursor.next)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_network_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)]) # More than one batch.
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a network error.
sock_info = cursor._Cursor__exhaust_mgr.sock
sock_info.sock.close()
# A getmore fails.
self.assertRaises(ConnectionFailure, list, cursor)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
class TestClientLazyConnect(IntegrationTest):
"""Test concurrent operations on a lazily-connecting MongoClient."""
def _get_client(self):
return rs_or_single_client(connect=False)
def test_insert_one(self):
def reset(collection):
collection.drop()
def insert_one(collection, _):
collection.insert_one({})
def test(collection):
self.assertEqual(NTHREADS, collection.count())
lazy_client_trial(reset, insert_one, test, self._get_client)
def test_update_one(self):
def reset(collection):
collection.drop()
collection.insert_one({'i': 0})
# Update doc 10 times.
def update_one(collection, _):
collection.update_one({}, {'$inc': {'i': 1}})
def test(collection):
self.assertEqual(NTHREADS, collection.find_one()['i'])
lazy_client_trial(reset, update_one, test, self._get_client)
def test_delete_one(self):
def reset(collection):
collection.drop()
collection.insert_many([{'i': i} for i in range(NTHREADS)])
def delete_one(collection, i):
collection.delete_one({'i': i})
def test(collection):
self.assertEqual(0, collection.count())
lazy_client_trial(reset, delete_one, test, self._get_client)
def test_find_one(self):
results = []
def reset(collection):
collection.drop()
collection.insert_one({})
results[:] = []
def find_one(collection, _):
results.append(collection.find_one())
def test(collection):
self.assertEqual(NTHREADS, len(results))
lazy_client_trial(reset, find_one, test, self._get_client)
def test_max_bson_size(self):
# Client should have sane defaults before connecting, and should update
# its configuration once connected.
c = self._get_client()
self.assertEqual(16 * (1024 ** 2), c.max_bson_size)
self.assertEqual(2 * c.max_bson_size, c.max_message_size)
# Make the client connect, so that it sets its max_bson_size and
# max_message_size attributes.
ismaster = c.db.command('ismaster')
self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size)
if 'maxMessageSizeBytes' in ismaster:
self.assertEqual(
ismaster['maxMessageSizeBytes'],
c.max_message_size)
class TestMongoClientFailover(MockClientTest):
def test_discover_primary(self):
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
self.assertEqual(c.address, ('a', 1))
# Fail over.
c.kill_host('a:1')
c.mock_primary = 'b:2'
c.close()
self.assertEqual(0, len(c.nodes))
t = c._get_topology()
t.select_servers(writable_server_selector) # Reconnect.
self.assertEqual(c.address, ('b', 2))
# a:1 not longer in nodes.
self.assertLess(len(c.nodes), 3)
# c:3 is rediscovered.
t.select_server_by_address(('c', 3))
def test_reconnect(self):
# Verify the node list isn't forgotten during a network failure.
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
# Total failure.
c.kill_host('a:1')
c.kill_host('b:2')
c.kill_host('c:3')
# MongoClient discovers it's alone.
self.assertRaises(AutoReconnect, c.db.collection.find_one)
# But it can reconnect.
c.revive_host('a:1')
c._get_topology().select_servers(writable_server_selector)
self.assertEqual(c.address, ('a', 1))
def _test_network_error(self, operation_callback):
# Verify only the disconnected server is reset by a network failure.
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs',
connect=False)
# Set host-specific information so we can test whether it is reset.
c.set_wire_version_range('a:1', 0, 1)
c.set_wire_version_range('b:2', 0, 2)
c._get_topology().select_servers(writable_server_selector)
wait_until(lambda: len(c.nodes) == 2, 'connect')
c.kill_host('a:1')
# MongoClient is disconnected from the primary.
self.assertRaises(AutoReconnect, operation_callback, c)
# The primary's description is reset.
server_a = c._get_topology().get_server_by_address(('a', 1))
sd_a = server_a.description
self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type)
self.assertEqual(0, sd_a.min_wire_version)
self.assertEqual(0, sd_a.max_wire_version)
# ...but not the secondary's.
server_b = c._get_topology().get_server_by_address(('b', 2))
sd_b = server_b.description
self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type)
self.assertEqual(0, sd_b.min_wire_version)
self.assertEqual(2, sd_b.max_wire_version)
def test_network_error_on_query(self):
callback = lambda client: client.db.collection.find_one()
self._test_network_error(callback)
def test_network_error_on_insert(self):
callback = lambda client: client.db.collection.insert_one({})
self._test_network_error(callback)
def test_network_error_on_update(self):
callback = lambda client: client.db.collection.update_one(
{}, {'$unset': 'x'})
self._test_network_error(callback)
def test_network_error_on_replace(self):
callback = lambda client: client.db.collection.replace_one({}, {})
self._test_network_error(callback)
def test_network_error_on_delete(self):
callback = lambda client: client.db.collection.delete_many({})
self._test_network_error(callback)
if __name__ == "__main__":
unittest.main()
|
takeshineshiro/django-cms
|
refs/heads/develop
|
cms/models/apphooks_reload.py
|
38
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models
class UrlconfRevision(models.Model):
revision = models.CharField(max_length=255)
class Meta:
app_label = 'cms'
def save(self, *args, **kwargs):
"""
Simply forces this model to be a singleton.
"""
self.pk = 1
super(UrlconfRevision, self).save(*args, **kwargs)
@classmethod
def get_or_create_revision(cls, revision=None):
"""
Convenience method for getting or creating revision.
"""
if revision is None:
revision = str(uuid.uuid4())
obj, created = cls.objects.get_or_create(
pk=1, defaults=dict(revision=revision))
return obj.revision, created
@classmethod
def update_revision(cls, revision):
"""
Convenience method for updating the revision.
"""
obj, created = cls.objects.get_or_create(
pk=1, defaults=dict(revision=revision))
if not created:
obj.revision = revision
obj.save()
|
jazkarta/edx-platform
|
refs/heads/master
|
lms/djangoapps/survey/utils.py
|
182
|
"""
Helper methods for Surveys
"""
from survey.models import SurveyForm, SurveyAnswer
from courseware.access import has_access
def is_survey_required_for_course(course_descriptor):
"""
Returns whether a Survey is required for this course
"""
# check to see that the Survey name has been defined in the CourseDescriptor
# and that the specified Survey exists
return course_descriptor.course_survey_required and \
SurveyForm.get(course_descriptor.course_survey_name, throw_if_not_found=False)
def must_answer_survey(course_descriptor, user):
"""
Returns whether a user needs to answer a required survey
"""
if not is_survey_required_for_course(course_descriptor):
return False
# this will throw exception if not found, but a non existing survey name will
# be trapped in the above is_survey_required_for_course() method
survey = SurveyForm.get(course_descriptor.course_survey_name)
has_staff_access = has_access(user, 'staff', course_descriptor)
# survey is required and it exists, let's see if user has answered the survey
# course staff do not need to answer survey
answered_survey = SurveyAnswer.do_survey_answers_exist(survey, user)
return not answered_survey and not has_staff_access
|
lcpt/xc
|
refs/heads/master
|
verif/tests/materials/uniaxial/test_cable_material01.py
|
1
|
# -*- coding: utf-8 -*-
# Test from Ansys manual
# Reference: Strength of Material, Part I, Elementary Theory and Problems, pg. 26, problem 10
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
__author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)"
__copyright__= "Copyright 2015, LCPT and AOO"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
E= 30e6 # Young modulus (psi)
l= 10 # Cable length in inches
sigmaPret= 1000 # Prestressing force (pounds)
area= 2
fPret= sigmaPret*area # Prestressing force (pounds)
# Model definition
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
# Problem type
modelSpace= predefined_spaces.SolidMechanics2D(nodes)
nodes.defaultTag= 1 #First node number.
nod= nodes.newNodeXY(0,0)
nod= nodes.newNodeXY(l,0.0)
# Materials definition
mat= typical_materials.defCableMaterial(preprocessor, "cable",E,sigmaPret,0.0)
''' We define nodes at the points where loads will be applied.
We will not compute stresses so we can use an arbitrary
cross section of unit area.'''
# Elements definition
elements= preprocessor.getElementHandler
elements.defaultMaterial= "cable"
elements.dimElem= 2 # Dimension of element space
# sintaxis: truss[<tag>]
elements.defaultTag= 1 #Tag for the next element.
truss= elements.newElement("Truss",xc.ID([1,2]))
truss.area= area
# Constraints
constraints= preprocessor.getBoundaryCondHandler
#
spc= constraints.newSPConstraint(1,0,0.0) # Node 1
spc= constraints.newSPConstraint(1,1,0.0)
spc= constraints.newSPConstraint(2,0,0.0) # Node 2
spc= constraints.newSPConstraint(2,1,0.0)
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= analisis.analyze(1)
nodes.calculateNodalReactions(True,1e-7)
R1= nodes.getNode(2).getReaction[0]
R2= nodes.getNode(1).getReaction[0]
ratio1= ((R1-fPret)/fPret)
ratio2= ((R2+fPret)/fPret)
'''
print "R1= ",R1
print "R2= ",R2
print "ratio1= ",(ratio1)
print "ratio2= ",(ratio2)
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1)<1e-5) & (abs(ratio2)<1e-5):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
|
tersmitten/ansible
|
refs/heads/devel
|
test/integration/targets/get_url/files/testserver.py
|
251
|
import sys
if __name__ == '__main__':
if sys.version_info[0] >= 3:
import http.server
import socketserver
PORT = int(sys.argv[1])
class Handler(http.server.SimpleHTTPRequestHandler):
pass
Handler.extensions_map['.json'] = 'application/json'
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
else:
import mimetypes
mimetypes.init()
mimetypes.add_type('application/json', '.json')
import SimpleHTTPServer
SimpleHTTPServer.test()
|
centrumholdings/buildbot
|
refs/heads/master
|
buildbot/process/subunitlogobserver.py
|
1
|
# -*- test-case-name: buildbot.test.test_buildstep -*-
from unittest import TestResult
from buildbot.process import buildstep
class DiscardStream:
"""A trivial thunk used to discard passthrough content."""
def write(self, bytes):
pass
class SubunitLogObserver(buildstep.LogLineObserver, TestResult):
"""Observe a log that may contain subunit output.
This class extends TestResult to receive the callbacks from the subunit
parser in the most direct fashion.
"""
def __init__(self):
buildstep.LogLineObserver.__init__(self)
TestResult.__init__(self)
try:
from subunit import TestProtocolServer
except ImportError:
raise ImportError("subunit is not importable, but is required for "
"SubunitLogObserver support.")
self.protocol = TestProtocolServer(self, DiscardStream())
def outLineReceived(self, line):
"""Process a received line."""
# Impedance mismatch: subunit wants lines, observers get lines-no\n
self.protocol.lineReceived(line + '\n')
def startTest(self, test):
TestResult.startTest(self, test)
self.step.setProgress('tests', self.testsRun)
def addError(self, test, err):
TestResult.addError(self, test, err)
self.issue()
def addFailure(self, test, err):
TestResult.addFailure(self, test, err)
self.issue()
def issue(self):
"""An issue - failing, erroring etc test."""
self.step.setProgress('tests failed', len(self.failures) + len(self.errors))
# this used to be referenced here, so we keep a link for old time's sake
import buildbot.steps.subunit
SubunitShellCommand = buildbot.steps.subunit.SubunitShellCommand
|
geekaia/edx-platform
|
refs/heads/master
|
common/djangoapps/cache_toolbox/__init__.py
|
261
|
"""
:mod:`cache_toolbox` --- Non-magical object caching tools for Django
====================================================================
Introduction
------------
``cache_toolbox`` is intended to be a lightweight series of independent tools
to leverage caching within Django projects.
The tools are deliberately `non-magical`. That is to say, instances are never
retrieved from caches behind your back and regular Django ``.filter()`` /
``.get()`` queries continue to work exactly as before.
Because of this, you can introduce ``cache_toolbox`` into your project slowly
when needed rather than "switching" to it with invasive changes.
Links
-----
View/download code
https://github.com/playfire/django-cache-toolbox
File a bug
https://github.com/playfire/django-cache-toolbox/issues
"""
from .model import cache_model
from .relation import cache_relation
|
schoolie/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/examples/charts_histogram_color.py
|
8
|
from bokeh.charts import Histogram, output_file, show
from bokeh.sampledata.autompg import autompg as df
p = Histogram(df, values='hp', color='navy', title="HP Distribution")
output_file("histogram_color.html")
show(p)
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_urllibnet.py
|
75
|
import unittest
from test import support
import contextlib
import socket
import urllib.request
import sys
import os
import email.message
import time
support.requires('network')
class URLTimeoutTest(unittest.TestCase):
# XXX this test doesn't seem to test anything useful.
TIMEOUT = 30.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
with support.transient_internet("www.example.com"):
f = urllib.request.urlopen("http://www.example.com/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.example.com/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
@contextlib.contextmanager
def urlopen(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
r = urllib.request.urlopen(*args, **kwargs)
try:
yield r
finally:
r.close()
def test_basic(self):
# Simple test expected to pass.
with self.urlopen("http://www.example.com/") as open_url:
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_readlines(self):
# Test both readline and readlines.
with self.urlopen("http://www.example.com/") as open_url:
self.assertIsInstance(open_url.readline(), bytes,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
def test_info(self):
# Test 'info'.
with self.urlopen("http://www.example.com/") as open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.example.com/"
with self.urlopen(URL) as open_url:
gotten_url = open_url.geturl()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.example.com/XXXinvalidXXX"
with support.transient_internet(URL):
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
@unittest.skipIf(sys.platform in ('win32',), 'not appropriate for Windows')
def test_fileno(self):
# Make sure fd returned by fileno is valid.
with self.urlopen("http://www.google.com/", timeout=None) as open_url:
fd = open_url.fileno()
with os.fdopen(fd, 'rb') as f:
self.assertTrue(f.read(), "reading from file created using fd "
"returned by fileno failed")
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except OSError:
# socket.gaierror is too narrow, since getaddrinfo() may also
# fail with EAI_SYSTEM and ETIMEDOUT (seen on Ubuntu 13.04),
# i.e. Python's TimeoutError.
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
failure_explanation = ('opening an invalid URL did not raise OSError; '
'can be caused by a broken DNS server '
'(e.g. returns 404 or hijacks page)')
with self.assertRaises(OSError, msg=failure_explanation):
# SF patch 809915: In Sep 2003, VeriSign started highjacking
# invalid .com and .net addresses to boost traffic to their own
# site. This test started failing then. One hopes the .invalid
# domain will be spared to serve its defined purpose.
urllib.request.urlopen("http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
@contextlib.contextmanager
def urlretrieve(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
file_location, info = urllib.request.urlretrieve(*args, **kwargs)
try:
yield file_location, info
finally:
support.unlink(file_location)
def test_basic(self):
# Test basic functionality.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from the file location returned"
" by urlretrieve failed")
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
with self.urlretrieve("http://www.example.com/",
support.TESTFN) as (file_location, info):
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from temporary file failed")
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertIsInstance(info, email.message.Message,
"info is not an instance of email.message.Message")
logo = "http://www.example.com/"
def test_data_header(self):
with self.urlretrieve(self.logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_reporthook(self):
records = []
def recording_reporthook(blocks, block_size, total_size):
records.append((blocks, block_size, total_size))
with self.urlretrieve(self.logo, reporthook=recording_reporthook) as (
file_location, fileheaders):
expected_size = int(fileheaders['Content-Length'])
records_repr = repr(records) # For use in error messages.
self.assertGreater(len(records), 1, msg="There should always be two "
"calls; the first one before the transfer starts.")
self.assertEqual(records[0][0], 0)
self.assertGreater(records[0][1], 0,
msg="block size can't be 0 in %s" % records_repr)
self.assertEqual(records[0][2], expected_size)
self.assertEqual(records[-1][2], expected_size)
block_sizes = {block_size for _, block_size, _ in records}
self.assertEqual({records[0][1]}, block_sizes,
msg="block sizes in %s must be equal" % records_repr)
self.assertGreaterEqual(records[-1][0]*records[0][1], expected_size,
msg="number of blocks * block size must be"
" >= total size in %s" % records_repr)
if __name__ == "__main__":
unittest.main()
|
bcornwellmott/erpnext
|
refs/heads/develop
|
erpnext/accounts/report/general_ledger/__init__.py
|
12133432
| |
shenson/cobbler
|
refs/heads/master
|
cobbler/web/__init__.py
|
12133432
| |
irudayarajisawa/django-cms
|
refs/heads/develop
|
cms/test_utils/fixtures/__init__.py
|
12133432
| |
ahmadshahwan/cohorte-runtime
|
refs/heads/master
|
python/src/lib/python/unidecode/x073.py
|
252
|
data = (
'Sha ', # 0x00
'Li ', # 0x01
'Han ', # 0x02
'Xian ', # 0x03
'Jing ', # 0x04
'Pai ', # 0x05
'Fei ', # 0x06
'Yao ', # 0x07
'Ba ', # 0x08
'Qi ', # 0x09
'Ni ', # 0x0a
'Biao ', # 0x0b
'Yin ', # 0x0c
'Lai ', # 0x0d
'Xi ', # 0x0e
'Jian ', # 0x0f
'Qiang ', # 0x10
'Kun ', # 0x11
'Yan ', # 0x12
'Guo ', # 0x13
'Zong ', # 0x14
'Mi ', # 0x15
'Chang ', # 0x16
'Yi ', # 0x17
'Zhi ', # 0x18
'Zheng ', # 0x19
'Ya ', # 0x1a
'Meng ', # 0x1b
'Cai ', # 0x1c
'Cu ', # 0x1d
'She ', # 0x1e
'Kari ', # 0x1f
'Cen ', # 0x20
'Luo ', # 0x21
'Hu ', # 0x22
'Zong ', # 0x23
'Ji ', # 0x24
'Wei ', # 0x25
'Feng ', # 0x26
'Wo ', # 0x27
'Yuan ', # 0x28
'Xing ', # 0x29
'Zhu ', # 0x2a
'Mao ', # 0x2b
'Wei ', # 0x2c
'Yuan ', # 0x2d
'Xian ', # 0x2e
'Tuan ', # 0x2f
'Ya ', # 0x30
'Nao ', # 0x31
'Xie ', # 0x32
'Jia ', # 0x33
'Hou ', # 0x34
'Bian ', # 0x35
'You ', # 0x36
'You ', # 0x37
'Mei ', # 0x38
'Zha ', # 0x39
'Yao ', # 0x3a
'Sun ', # 0x3b
'Bo ', # 0x3c
'Ming ', # 0x3d
'Hua ', # 0x3e
'Yuan ', # 0x3f
'Sou ', # 0x40
'Ma ', # 0x41
'Yuan ', # 0x42
'Dai ', # 0x43
'Yu ', # 0x44
'Shi ', # 0x45
'Hao ', # 0x46
'[?] ', # 0x47
'Yi ', # 0x48
'Zhen ', # 0x49
'Chuang ', # 0x4a
'Hao ', # 0x4b
'Man ', # 0x4c
'Jing ', # 0x4d
'Jiang ', # 0x4e
'Mu ', # 0x4f
'Zhang ', # 0x50
'Chan ', # 0x51
'Ao ', # 0x52
'Ao ', # 0x53
'Hao ', # 0x54
'Cui ', # 0x55
'Fen ', # 0x56
'Jue ', # 0x57
'Bi ', # 0x58
'Bi ', # 0x59
'Huang ', # 0x5a
'Pu ', # 0x5b
'Lin ', # 0x5c
'Yu ', # 0x5d
'Tong ', # 0x5e
'Yao ', # 0x5f
'Liao ', # 0x60
'Shuo ', # 0x61
'Xiao ', # 0x62
'Swu ', # 0x63
'Ton ', # 0x64
'Xi ', # 0x65
'Ge ', # 0x66
'Juan ', # 0x67
'Du ', # 0x68
'Hui ', # 0x69
'Kuai ', # 0x6a
'Xian ', # 0x6b
'Xie ', # 0x6c
'Ta ', # 0x6d
'Xian ', # 0x6e
'Xun ', # 0x6f
'Ning ', # 0x70
'Pin ', # 0x71
'Huo ', # 0x72
'Nou ', # 0x73
'Meng ', # 0x74
'Lie ', # 0x75
'Nao ', # 0x76
'Guang ', # 0x77
'Shou ', # 0x78
'Lu ', # 0x79
'Ta ', # 0x7a
'Xian ', # 0x7b
'Mi ', # 0x7c
'Rang ', # 0x7d
'Huan ', # 0x7e
'Nao ', # 0x7f
'Luo ', # 0x80
'Xian ', # 0x81
'Qi ', # 0x82
'Jue ', # 0x83
'Xuan ', # 0x84
'Miao ', # 0x85
'Zi ', # 0x86
'Lu ', # 0x87
'Lu ', # 0x88
'Yu ', # 0x89
'Su ', # 0x8a
'Wang ', # 0x8b
'Qiu ', # 0x8c
'Ga ', # 0x8d
'Ding ', # 0x8e
'Le ', # 0x8f
'Ba ', # 0x90
'Ji ', # 0x91
'Hong ', # 0x92
'Di ', # 0x93
'Quan ', # 0x94
'Gan ', # 0x95
'Jiu ', # 0x96
'Yu ', # 0x97
'Ji ', # 0x98
'Yu ', # 0x99
'Yang ', # 0x9a
'Ma ', # 0x9b
'Gong ', # 0x9c
'Wu ', # 0x9d
'Fu ', # 0x9e
'Wen ', # 0x9f
'Jie ', # 0xa0
'Ya ', # 0xa1
'Fen ', # 0xa2
'Bian ', # 0xa3
'Beng ', # 0xa4
'Yue ', # 0xa5
'Jue ', # 0xa6
'Yun ', # 0xa7
'Jue ', # 0xa8
'Wan ', # 0xa9
'Jian ', # 0xaa
'Mei ', # 0xab
'Dan ', # 0xac
'Pi ', # 0xad
'Wei ', # 0xae
'Huan ', # 0xaf
'Xian ', # 0xb0
'Qiang ', # 0xb1
'Ling ', # 0xb2
'Dai ', # 0xb3
'Yi ', # 0xb4
'An ', # 0xb5
'Ping ', # 0xb6
'Dian ', # 0xb7
'Fu ', # 0xb8
'Xuan ', # 0xb9
'Xi ', # 0xba
'Bo ', # 0xbb
'Ci ', # 0xbc
'Gou ', # 0xbd
'Jia ', # 0xbe
'Shao ', # 0xbf
'Po ', # 0xc0
'Ci ', # 0xc1
'Ke ', # 0xc2
'Ran ', # 0xc3
'Sheng ', # 0xc4
'Shen ', # 0xc5
'Yi ', # 0xc6
'Zu ', # 0xc7
'Jia ', # 0xc8
'Min ', # 0xc9
'Shan ', # 0xca
'Liu ', # 0xcb
'Bi ', # 0xcc
'Zhen ', # 0xcd
'Zhen ', # 0xce
'Jue ', # 0xcf
'Fa ', # 0xd0
'Long ', # 0xd1
'Jin ', # 0xd2
'Jiao ', # 0xd3
'Jian ', # 0xd4
'Li ', # 0xd5
'Guang ', # 0xd6
'Xian ', # 0xd7
'Zhou ', # 0xd8
'Gong ', # 0xd9
'Yan ', # 0xda
'Xiu ', # 0xdb
'Yang ', # 0xdc
'Xu ', # 0xdd
'Luo ', # 0xde
'Su ', # 0xdf
'Zhu ', # 0xe0
'Qin ', # 0xe1
'Ken ', # 0xe2
'Xun ', # 0xe3
'Bao ', # 0xe4
'Er ', # 0xe5
'Xiang ', # 0xe6
'Yao ', # 0xe7
'Xia ', # 0xe8
'Heng ', # 0xe9
'Gui ', # 0xea
'Chong ', # 0xeb
'Xu ', # 0xec
'Ban ', # 0xed
'Pei ', # 0xee
'[?] ', # 0xef
'Dang ', # 0xf0
'Ei ', # 0xf1
'Hun ', # 0xf2
'Wen ', # 0xf3
'E ', # 0xf4
'Cheng ', # 0xf5
'Ti ', # 0xf6
'Wu ', # 0xf7
'Wu ', # 0xf8
'Cheng ', # 0xf9
'Jun ', # 0xfa
'Mei ', # 0xfb
'Bei ', # 0xfc
'Ting ', # 0xfd
'Xian ', # 0xfe
'Chuo ', # 0xff
)
|
sorl/django-mockups
|
refs/heads/master
|
mockups/tests/urls.py
|
2
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
from django.http import HttpResponse
admin.autodiscover()
def handle404(request):
return HttpResponse('404')
def handle500(request):
return HttpResponse('500')
handler404 = 'mockups_tests.urls.handle404'
handler500 = 'mockups_tests.urls.handle500'
urlpatterns = patterns('',
url(r'^media/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
url(r'^admin/', include(admin.site.urls), name="admin"),
)
|
shuggiefisher/django-on-google-app-engine-base
|
refs/heads/master
|
django/contrib/gis/tests/relatedapp/models.py
|
274
|
from django.contrib.gis.db import models
from django.contrib.localflavor.us.models import USStateField
class Location(models.Model):
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.point.wkt
class City(models.Model):
name = models.CharField(max_length=50)
state = USStateField()
location = models.ForeignKey(Location)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(models.Model):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
objects = models.GeoManager()
class Parcel(models.Model):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __unicode__(self): return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(models.Model):
name = models.CharField(max_length=100)
objects = models.GeoManager()
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
objects = models.GeoManager()
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
objects = models.GeoManager()
|
JingZhou0404/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/config/logging.py
|
122
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Specified in seconds
queue_log_duration = 60 * 60
|
GoogleCloudPlatform/mlops-on-gcp
|
refs/heads/master
|
workshops/guided-projects/solution_2/models/keras/model_test.py
|
1
|
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.keras import model
class ModelTest(tf.test.TestCase):
def testBuildKerasModel(self):
pass
if __name__ == '__main__':
tf.test.main()
|
saurabh6790/omnit-app
|
refs/heads/master
|
patches/may_2013/repost_stock_for_no_posting_time.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
def execute():
import webnotes
from stock.stock_ledger import update_entries_after
res = webnotes.conn.sql("""select distinct item_code, warehouse from `tabStock Ledger Entry`
where posting_time = '00:00'""")
i=0
for d in res:
try:
update_entries_after({ "item_code": d[0], "warehouse": d[1] })
except:
pass
i += 1
if i%20 == 0:
webnotes.conn.sql("commit")
webnotes.conn.sql("start transaction")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.