repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
philcleveland/grpc | refs/heads/master | tools/buildgen/build-cleaner.py | 44 | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# produces cleaner build.yaml files
import collections
import os
import sys
import yaml
TEST = (os.environ.get('TEST', 'false') == 'true')
_TOP_LEVEL_KEYS = ['settings', 'proto_deps', 'filegroups', 'libs', 'targets', 'vspackages']
_ELEM_KEYS = [
'name',
'gtest',
'cpu_cost',
'flaky',
'build',
'run',
'language',
'public_headers',
'headers',
'src',
'deps']
def repr_ordered_dict(dumper, odict):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())
yaml.add_representer(collections.OrderedDict, repr_ordered_dict)
def rebuild_as_ordered_dict(indict, special_keys):
outdict = collections.OrderedDict()
for key in sorted(indict.keys()):
if '#' in key:
outdict[key] = indict[key]
for key in special_keys:
if key in indict:
outdict[key] = indict[key]
for key in sorted(indict.keys()):
if key in special_keys: continue
if '#' in key: continue
outdict[key] = indict[key]
return outdict
def clean_elem(indict):
for name in ['public_headers', 'headers', 'src']:
if name not in indict: continue
inlist = indict[name]
protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
others = set(x for x in inlist if x not in protos)
indict[name] = protos + sorted(others)
return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
for filename in sys.argv[1:]:
with open(filename) as f:
js = yaml.load(f)
js = rebuild_as_ordered_dict(js, _TOP_LEVEL_KEYS)
for grp in ['filegroups', 'libs', 'targets']:
if grp not in js: continue
js[grp] = sorted([clean_elem(x) for x in js[grp]],
key=lambda x: (x.get('language', '_'), x['name']))
output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
# massage out trailing whitespace
lines = []
for line in output.splitlines():
lines.append(line.rstrip() + '\n')
output = ''.join(lines)
if TEST:
with open(filename) as f:
assert f.read() == output
else:
with open(filename, 'w') as f:
f.write(output)
|
voutilad/courtlistener | refs/heads/master | cl/search/appellate_review/assign_appellate_review.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 7 16:35:29 2016
@author: elliott
"""
import pandas as pd
from cl.search.models import AppellateReview
df = pd.read_excel('filename', 0)
for i, row in df.iterrows():
upper = row.upper_court
lower = row.lower_court
if not pd.isnull(row.date_start):
start = row.date_start
else:
start = None
if not pd.isnull(row.date_end):
end = row.date_end
else:
end = None
review = AppellateReview(
upper_court = upper,
lower_court = lower,
date_start = start,
date_end = end)
review.save()
|
quoclieu/codebrew17-starving | refs/heads/master | env/lib/python3.5/site-packages/setuptools/dist.py | 79 | __all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
franktakes/teexgraph | refs/heads/master | examples/python_example.py | 1 | from copy import deepcopy
from pyteexgraph import Graph, Scope
g = Graph()
g.loadDirected("/z/edge_list")
print("Loaded", g.isLoaded())
print("Average Degree", g.averageDegree(Scope.FULL))
print("Diameter BD (fails because graph is directed", g.diameterBD())
# Make a deepcopy so we can mutate the graph while preserving the original data
g_undirected = deepcopy(g)
g_undirected.makeUndirected()
g_undirected.computeWCC()
print("Diameter BD", g_undirected.diameterBD()) |
Lujeni/ansible | refs/heads/devel | lib/ansible/modules/network/ftd/ftd_install.py | 27 | #!/usr/bin/python
# Copyright (c) 2019 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ftd_install
short_description: Installs FTD pkg image on the firewall
description:
- Provisioning module for FTD devices that installs ROMMON image (if needed) and
FTD pkg image on the firewall.
- Can be used with `httpapi` and `local` connection types. The `httpapi` is preferred,
the `local` connection should be used only when the device cannot be accessed via
REST API.
version_added: "2.8"
requirements: [ "python >= 3.5", "firepower-kickstart" ]
notes:
- Requires `firepower-kickstart` library that should be installed separately and requires Python >= 3.5.
- On localhost, Ansible can be still run with Python >= 2.7, but the interpreter for this particular module must be
Python >= 3.5.
- Python interpreter for the module can overwritten in `ansible_python_interpreter` variable.
author: "Cisco Systems, Inc. (@annikulin)"
options:
device_hostname:
description:
- Hostname of the device as appears in the prompt (e.g., 'firepower-5516').
required: true
type: str
device_username:
description:
- Username to login on the device.
- Defaulted to 'admin' if not specified.
required: false
type: str
default: admin
device_password:
description:
- Password to login on the device.
required: true
type: str
device_sudo_password:
description:
- Root password for the device. If not specified, `device_password` is used.
required: false
type: str
device_new_password:
description:
- New device password to set after image installation.
- If not specified, current password from `device_password` property is reused.
- Not applicable for ASA5500-X series devices.
required: false
type: str
device_ip:
description:
- Device IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_gateway:
description:
- Device gateway of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_netmask:
description:
- Device netmask of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_model:
description:
- Platform model of the device (e.g., 'Cisco ASA5506-X Threat Defense').
- If not specified and connection is 'httpapi`, the module tries to fetch the device model via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
choices:
- Cisco ASA5506-X Threat Defense
- Cisco ASA5508-X Threat Defense
- Cisco ASA5516-X Threat Defense
- Cisco Firepower 2110 Threat Defense
- Cisco Firepower 2120 Threat Defense
- Cisco Firepower 2130 Threat Defense
- Cisco Firepower 2140 Threat Defense
dns_server:
description:
- DNS IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
console_ip:
description:
- IP address of a terminal server.
- Used to set up an SSH connection with device's console port through the terminal server.
required: true
type: str
console_port:
description:
- Device's port on a terminal server.
required: true
type: str
console_username:
description:
- Username to login on a terminal server.
required: true
type: str
console_password:
description:
- Password to login on a terminal server.
required: true
type: str
rommon_file_location:
description:
- Path to the boot (ROMMON) image on TFTP server.
- Only TFTP is supported.
required: true
type: str
image_file_location:
description:
- Path to the FTD pkg image on the server to be downloaded.
- FTP, SCP, SFTP, TFTP, or HTTP protocols are usually supported, but may depend on the device model.
required: true
type: str
image_version:
description:
- Version of FTD image to be installed.
- Helps to compare target and current FTD versions to prevent unnecessary reinstalls.
required: true
type: str
force_install:
description:
- Forces the FTD image to be installed even when the same version is already installed on the firewall.
- By default, the module stops execution when the target version is installed in the device.
required: false
type: bool
default: false
search_domains:
description:
- Search domains delimited by comma.
- Defaulted to 'cisco.com' if not specified.
required: false
type: str
default: cisco.com
"""
EXAMPLES = """
- name: Install image v6.3.0 on FTD 5516
ftd_install:
device_hostname: firepower
device_password: pass
device_ip: 192.168.0.1
device_netmask: 255.255.255.0
device_gateway: 192.168.0.254
dns_server: 8.8.8.8
console_ip: 10.89.0.0
console_port: 2004
console_username: console_user
console_password: console_pass
rommon_file_location: 'tftp://10.89.0.11/installers/ftd-boot-9.10.1.3.lfbff'
image_file_location: 'https://10.89.0.11/installers/ftd-6.3.0-83.pkg'
image_version: 6.3.0-83
"""
RETURN = """
msg:
description: The message saying whether the image was installed or explaining why the installation failed.
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.ftd.configuration import BaseConfigurationResource, ParamName
from ansible.module_utils.network.ftd.device import assert_kick_is_installed, FtdPlatformFactory, FtdModel
from ansible.module_utils.network.ftd.operation import FtdOperations, get_system_info
REQUIRED_PARAMS_FOR_LOCAL_CONNECTION = ['device_ip', 'device_netmask', 'device_gateway', 'device_model', 'dns_server']
def main():
fields = dict(
device_hostname=dict(type='str', required=True),
device_username=dict(type='str', required=False, default='admin'),
device_password=dict(type='str', required=True, no_log=True),
device_sudo_password=dict(type='str', required=False, no_log=True),
device_new_password=dict(type='str', required=False, no_log=True),
device_ip=dict(type='str', required=False),
device_netmask=dict(type='str', required=False),
device_gateway=dict(type='str', required=False),
device_model=dict(type='str', required=False, choices=FtdModel.supported_models()),
dns_server=dict(type='str', required=False),
search_domains=dict(type='str', required=False, default='cisco.com'),
console_ip=dict(type='str', required=True),
console_port=dict(type='str', required=True),
console_username=dict(type='str', required=True),
console_password=dict(type='str', required=True, no_log=True),
rommon_file_location=dict(type='str', required=True),
image_file_location=dict(type='str', required=True),
image_version=dict(type='str', required=True),
force_install=dict(type='bool', required=False, default=False)
)
module = AnsibleModule(argument_spec=fields)
assert_kick_is_installed(module)
use_local_connection = module._socket_path is None
if use_local_connection:
check_required_params_for_local_connection(module, module.params)
platform_model = module.params['device_model']
check_that_model_is_supported(module, platform_model)
else:
connection = Connection(module._socket_path)
resource = BaseConfigurationResource(connection, module.check_mode)
system_info = get_system_info(resource)
platform_model = module.params['device_model'] or system_info['platformModel']
check_that_model_is_supported(module, platform_model)
check_that_update_is_needed(module, system_info)
check_management_and_dns_params(resource, module.params)
ftd_platform = FtdPlatformFactory.create(platform_model, module.params)
ftd_platform.install_ftd_image(module.params)
module.exit_json(changed=True,
msg='Successfully installed FTD image %s on the firewall device.' % module.params["image_version"])
def check_required_params_for_local_connection(module, params):
missing_params = [k for k, v in iteritems(params) if k in REQUIRED_PARAMS_FOR_LOCAL_CONNECTION and v is None]
if missing_params:
message = "The following parameters are mandatory when the module is used with 'local' connection: %s." % \
', '.join(sorted(missing_params))
module.fail_json(msg=message)
def check_that_model_is_supported(module, platform_model):
if platform_model not in FtdModel.supported_models():
module.fail_json(msg="Platform model '%s' is not supported by this module." % platform_model)
def check_that_update_is_needed(module, system_info):
target_ftd_version = module.params["image_version"]
if not module.params["force_install"] and target_ftd_version == system_info['softwareVersion']:
module.exit_json(changed=False, msg="FTD already has %s version of software installed." % target_ftd_version)
def check_management_and_dns_params(resource, params):
if not all([params['device_ip'], params['device_netmask'], params['device_gateway']]):
management_ip = resource.execute_operation(FtdOperations.GET_MANAGEMENT_IP_LIST, {})['items'][0]
params['device_ip'] = params['device_ip'] or management_ip['ipv4Address']
params['device_netmask'] = params['device_netmask'] or management_ip['ipv4NetMask']
params['device_gateway'] = params['device_gateway'] or management_ip['ipv4Gateway']
if not params['dns_server']:
dns_setting = resource.execute_operation(FtdOperations.GET_DNS_SETTING_LIST, {})['items'][0]
dns_server_group_id = dns_setting['dnsServerGroup']['id']
dns_server_group = resource.execute_operation(FtdOperations.GET_DNS_SERVER_GROUP,
{ParamName.PATH_PARAMS: {'objId': dns_server_group_id}})
params['dns_server'] = dns_server_group['dnsServers'][0]['ipAddress']
if __name__ == '__main__':
main()
|
Pablo126/SSBW | refs/heads/master | Entrega1/lib/python3.5/site-packages/django/contrib/sessions/backends/base.py | 37 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, while retaining the current session data.
"""
data = self._session
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() only updates an existing object and does not create one
(an UpdateError is raised).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
XeCycle/indico | refs/heads/master | indico/MaKaC/webinterface/rh/services.py | 2 | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import MaKaC.webinterface.rh.admins as admins
import MaKaC.webinterface.urlHandlers as urlHandlers
from MaKaC.common import utils
from MaKaC.common import info
from MaKaC.webinterface.pages import admins as adminPages
from MaKaC.errors import MaKaCError
class RHServicesBase(admins.RHAdminBase):
pass
class RHIPBasedACL( RHServicesBase ):
""" IP Based ACL Configuration Interface """
_uh = urlHandlers.UHIPBasedACL
def _process( self ):
p = adminPages.WPIPBasedACL(self)
return p.display()
class RHIPBasedACLFullAccessGrant( RHServicesBase ):
_uh = urlHandlers.UHIPBasedACLFullAccessGrant
def _checkParams( self, params ):
RHServicesBase._checkParams( self, params )
self._params = params
def _process( self ):
ipAddress = self._params.get('ipAddress', None)
if ipAddress:
if not utils.validIP(ipAddress):
raise MaKaCError("IP Address %s is not valid!" % ipAddress)
else:
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
ip_acl_mgr = minfo.getIPBasedACLMgr()
ip_acl_mgr.grant_full_access(ipAddress)
self._redirect(urlHandlers.UHIPBasedACL.getURL())
class RHIPBasedACLFullAccessRevoke( RHServicesBase ):
_uh = urlHandlers.UHIPBasedACLFullAccessRevoke
def _checkParams( self, params ):
RHServicesBase._checkParams( self, params )
self._params = params
def _process( self ):
ipAddress = self._params.get('ipAddress', None)
if ipAddress:
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
ip_acl_mgr = minfo.getIPBasedACLMgr()
ip_acl_mgr.revoke_full_access(ipAddress)
self._redirect(urlHandlers.UHIPBasedACL.getURL())
|
DanielSBrown/osf.io | refs/heads/develop | website/addons/box/tests/test_client.py | 36 | # -*- coding: utf-8 -*-
from nose.tools import * # noqa (PEP8 asserts)
from tests.base import OsfTestCase
from tests.factories import UserFactory
from website.addons.box.model import BoxUserSettings
class TestCore(OsfTestCase):
def setUp(self):
super(TestCore, self).setUp()
self.user = UserFactory()
self.user.add_addon('box')
self.user.save()
self.settings = self.user.get_addon('box')
self.settings.save()
def test_get_addon_returns_box_user_settings(self):
result = self.user.get_addon('box')
assert_true(isinstance(result, BoxUserSettings))
|
AmeBel/opencog | refs/heads/master | scripts/get_python_lib.py | 5 | import sys
import sysconfig
import site
if __name__ == '__main__':
# This is a hack due to the distutils in debian/ubuntu's python3 being misconfigured
# see discussion https://github.com/opencog/atomspace/issues/1782
#
# If the bug is fixed, this script could be replaced by:
#
# from distutils.sysconfig import get_python_lib; print(get_python_lib(plat_specific=True, prefix=prefix))
#
# However, using this would not respect a python virtual environments, so in a way this is better!
prefix = sys.argv[1]
# use sites if the prefix is recognized and the sites module is available
# (virtualenv is missing getsitepackages())
if hasattr(site, 'getsitepackages'):
paths = [p for p in site.getsitepackages() if p.startswith(prefix)]
if len(paths) == 1:
print(paths[0])
exit(0)
# use sysconfig platlib as the fall back
print(sysconfig.get_paths()['platlib'])
|
bbbenja/SickRage | refs/heads/master | lib/simplejson/scanner.py | 928 | """JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
|
Rewardcoin/p2ppool-SGcoin | refs/heads/master | p2pool/util/jsonrpc.py | 261 | from __future__ import division
import json
import weakref
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import failure, log
from twisted.web import client, error
from p2pool.util import deferral, deferred_resource, memoize
class Error(Exception):
def __init__(self, code, message, data=None):
if type(self) is Error:
raise TypeError("can't directly instantiate Error class; use Error_for_code")
if not isinstance(code, int):
raise TypeError('code must be an int')
#if not isinstance(message, unicode):
# raise TypeError('message must be a unicode')
self.code, self.message, self.data = code, message, data
def __str__(self):
return '%i %s' % (self.code, self.message) + (' %r' % (self.data, ) if self.data is not None else '')
def _to_obj(self):
return {
'code': self.code,
'message': self.message,
'data': self.data,
}
@memoize.memoize_with_backing(weakref.WeakValueDictionary())
def Error_for_code(code):
class NarrowError(Error):
def __init__(self, *args, **kwargs):
Error.__init__(self, code, *args, **kwargs)
return NarrowError
class Proxy(object):
def __init__(self, func, services=[]):
self._func = func
self._services = services
def __getattr__(self, attr):
if attr.startswith('rpc_'):
return lambda *params: self._func('.'.join(self._services + [attr[len('rpc_'):]]), params)
elif attr.startswith('svc_'):
return Proxy(self._func, self._services + [attr[len('svc_'):]])
else:
raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, attr))
@defer.inlineCallbacks
def _handle(data, provider, preargs=(), response_handler=None):
id_ = None
try:
try:
try:
req = json.loads(data)
except Exception:
raise Error_for_code(-32700)(u'Parse error')
if 'result' in req or 'error' in req:
response_handler(req['id'], req['result'] if 'error' not in req or req['error'] is None else
failure.Failure(Error_for_code(req['error']['code'])(req['error']['message'], req['error'].get('data', None))))
defer.returnValue(None)
id_ = req.get('id', None)
method = req.get('method', None)
if not isinstance(method, basestring):
raise Error_for_code(-32600)(u'Invalid Request')
params = req.get('params', [])
if not isinstance(params, list):
raise Error_for_code(-32600)(u'Invalid Request')
for service_name in method.split('.')[:-1]:
provider = getattr(provider, 'svc_' + service_name, None)
if provider is None:
raise Error_for_code(-32601)(u'Service not found')
method_meth = getattr(provider, 'rpc_' + method.split('.')[-1], None)
if method_meth is None:
raise Error_for_code(-32601)(u'Method not found')
result = yield method_meth(*list(preargs) + list(params))
error = None
except Error:
raise
except Exception:
log.err(None, 'Squelched JSON error:')
raise Error_for_code(-32099)(u'Unknown error')
except Error, e:
result = None
error = e._to_obj()
defer.returnValue(json.dumps(dict(
jsonrpc='2.0',
id=id_,
result=result,
error=error,
)))
# HTTP
@defer.inlineCallbacks
def _http_do(url, headers, timeout, method, params):
id_ = 0
try:
data = yield client.getPage(
url=url,
method='POST',
headers=dict(headers, **{'Content-Type': 'application/json'}),
postdata=json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id_,
}),
timeout=timeout,
)
except error.Error, e:
try:
resp = json.loads(e.response)
except:
raise e
else:
resp = json.loads(data)
if resp['id'] != id_:
raise ValueError('invalid id')
if 'error' in resp and resp['error'] is not None:
raise Error_for_code(resp['error']['code'])(resp['error']['message'], resp['error'].get('data', None))
defer.returnValue(resp['result'])
HTTPProxy = lambda url, headers={}, timeout=5: Proxy(lambda method, params: _http_do(url, headers, timeout, method, params))
class HTTPServer(deferred_resource.DeferredResource):
def __init__(self, provider):
deferred_resource.DeferredResource.__init__(self)
self._provider = provider
@defer.inlineCallbacks
def render_POST(self, request):
data = yield _handle(request.content.read(), self._provider, preargs=[request])
assert data is not None
request.setHeader('Content-Type', 'application/json')
request.setHeader('Content-Length', len(data))
request.write(data)
class LineBasedPeer(basic.LineOnlyReceiver):
delimiter = '\n'
def __init__(self):
#basic.LineOnlyReceiver.__init__(self)
self._matcher = deferral.GenericDeferrer(max_id=2**30, func=lambda id, method, params: self.sendLine(json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id,
})))
self.other = Proxy(self._matcher)
def lineReceived(self, line):
_handle(line, self, response_handler=self._matcher.got_response).addCallback(lambda line2: self.sendLine(line2) if line2 is not None else None)
|
crosswalk-project/chromium-crosswalk-efl | refs/heads/efl/crosswalk-10/39.0.2171.19 | native_client_sdk/src/tools/decode_dump.py | 102 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to decode a crash dump generated by untrusted_crash_dump.[ch]
Currently this produces a simple stack trace.
"""
import json
import optparse
import os
import posixpath
import subprocess
import sys
class CoreDecoder(object):
"""Class to process core dumps."""
def __init__(self, main_nexe, nmf_filename,
addr2line, library_paths, platform):
"""Construct and object to process core dumps.
Args:
main_nexe: nexe to resolve NaClMain references from.
nmf_filename: nmf to resolve references from.
addr2line: path to appropriate addr2line.
library_paths: list of paths to search for libraries.
platform: platform string to use in nmf files.
"""
self.main_nexe = main_nexe
self.nmf_filename = nmf_filename
if nmf_filename == '-':
self.nmf_data = {}
else:
self.nmf_data = json.load(open(nmf_filename))
self.addr2line = addr2line
self.library_paths = library_paths
self.platform = platform
def _SelectModulePath(self, filename):
"""Select which path to get a module from.
Args:
filename: filename of a module (as appears in phdrs).
Returns:
Full local path to the file.
Derived by consulting the manifest.
"""
# For some names try the main nexe.
# NaClMain is the argv[0] setup in sel_main.c
# (null) shows up in chrome.
if self.main_nexe is not None and filename in ['NaClMain', '(null)']:
return self.main_nexe
filepart = posixpath.basename(filename)
nmf_entry = self.nmf_data.get('files', {}).get(filepart, {})
nmf_url = nmf_entry.get(self.platform, {}).get('url')
# Try filename directly if not in manifest.
if nmf_url is None:
return filename
# Look for the module relative to the manifest (if any),
# then in other search paths.
paths = []
if self.nmf_filename != '-':
paths.append(os.path.dirname(self.nmf_filename))
paths.extend(self.library_paths)
for path in paths:
pfilename = os.path.join(path, nmf_url)
if os.path.exists(pfilename):
return pfilename
# If nothing else, try the path directly.
return filename
def _DecodeAddressSegment(self, segments, address):
"""Convert an address to a segment relative one, plus filename.
Args:
segments: a list of phdr segments.
address: a process wide code address.
Returns:
A tuple of filename and segment relative address.
"""
for segment in segments:
for phdr in segment['dlpi_phdr']:
start = segment['dlpi_addr'] + phdr['p_vaddr']
end = start + phdr['p_memsz']
if address >= start and address < end:
return (segment['dlpi_name'], address - segment['dlpi_addr'])
return ('(null)', address)
def _Addr2Line(self, segments, address):
"""Use addr2line to decode a code address.
Args:
segments: A list of phdr segments.
address: a code address.
Returns:
A list of dicts containing: function, filename, lineno.
"""
filename, address = self._DecodeAddressSegment(segments, address)
filename = self._SelectModulePath(filename)
if not os.path.exists(filename):
return [{
'function': 'Unknown_function',
'filename': 'unknown_file',
'lineno': -1,
}]
# Use address - 1 to get the call site instead of the line after.
address -= 1
cmd = [
self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address,
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process_stdout, _ = process.communicate()
assert process.returncode == 0
lines = process_stdout.splitlines()
assert len(lines) % 2 == 0
results = []
for index in xrange(len(lines) / 2):
func = lines[index * 2]
afilename, lineno = lines[index * 2 + 1].split(':', 1)
results.append({
'function': func,
'filename': afilename,
'lineno': int(lineno),
})
return results
def Decode(self, text):
core = json.loads(text)
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def LoadAndDecode(self, core_path):
"""Given a core.json file, load and embellish with decoded addresses.
Args:
core_path: source file containing a dump.
Returns:
An embellished core dump dict (decoded code addresses).
"""
core = json.load(open(core_path))
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def StackTrace(self, info):
"""Convert a decoded core.json dump to a simple stack trace.
Args:
info: core.json info with decoded code addresses.
Returns:
A list of dicts with filename, lineno, function (deepest first).
"""
trace = []
for frame in info['frames']:
for scope in frame['scopes']:
trace.append(scope)
return trace
def PrintTrace(self, trace, out):
"""Print a trace to a file like object.
Args:
trace: A list of [filename, lineno, function] (deepest first).
out: file like object to output the trace to.
"""
for scope in trace:
out.write('%s at %s:%d\n' % (
scope['function'],
scope['filename'],
scope['lineno']))
def Main(args):
parser = optparse.OptionParser(
usage='USAGE: %prog [options] <core.json>')
parser.add_option('-m', '--main-nexe', dest='main_nexe',
help='nexe to resolve NaClMain references from')
parser.add_option('-n', '--nmf', dest='nmf_filename', default='-',
help='nmf to resolve references from')
parser.add_option('-a', '--addr2line', dest='addr2line',
help='path to appropriate addr2line')
parser.add_option('-L', '--library-path', dest='library_paths',
action='append', default=[],
help='path to search for shared libraries')
parser.add_option('-p', '--platform', dest='platform',
help='platform in a style match nmf files')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
sys.exit(1)
decoder = CoreDecoder(
main_nexe=options.main_nexe,
nmf_filename=options.nmf_filename,
addr2line=options.add2line,
library_paths=options.library_paths,
platform=options.platform)
info = decoder.LoadAndDecode(args[0])
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
if __name__ == '__main__':
Main(sys.argv[1:])
|
ktbyers/pynet-ons-jan17 | refs/heads/master | day1/numbers_ex1.py | 4 | #!/usr/bin/env python
num1 = int(raw_input("Enter first number: "))
num2 = int(raw_input("Enter second number: "))
print "\n\nSum: {}".format(num1 + num2)
print "Difference: {}".format(num1 - num2)
print "Product: {}".format(num1 * num2)
print "Division: {:.2f}".format(num1/float(num2))
print
|
vipmike007/virt-test | refs/heads/master | shared/deps/run_autotest/kernel_install/kernelinstall.py | 26 | import os
import logging
import sys
from autotest.client import test
from autotest.client import utils
from autotest.client.shared import git, error, software_manager
class kernelinstall(test.test):
version = 1
sm = software_manager.SoftwareManager()
def _kernel_install_rpm(self, rpm_file, kernel_deps_rpms=None,
need_reboot=True):
"""
Install kernel rpm package.
The rpm packages should be a url or put in this test's
directory (client/test/kernelinstall)
"""
if kernel_deps_rpms:
logging.info("Installing kernel dependencies.")
if isinstance(kernel_deps_rpms, list):
kernel_deps_rpms = " ".join(kernel_deps_rpms)
self.sm.install(kernel_deps_rpms)
dst = os.path.join("/tmp", os.path.basename(rpm_file))
knl = utils.get_file(rpm_file, dst)
kernel = self.job.kernel(knl)
logging.info("Installing kernel %s", rpm_file)
kernel.install(install_vmlinux=False)
if need_reboot:
kernel.boot()
else:
kernel.add_to_bootloader()
def _kernel_install_koji(self, kernel_koji_spec, kernel_deps_koji_spec,
need_reboot=True):
# Using hardcoded package names (the names are not expected to change)
# we avoid lookup errors due to SSL problems, so let's go with that.
for koji_package in ['koji', 'brewkoji']:
if not self.sm.check_installed(koji_package):
logging.debug("%s missing - trying to install", koji_package)
self.sm.install(koji_package)
sys.path.append(self.bindir)
try:
from staging import utils_koji
except ImportError:
from autotest.client.shared import utils_koji
# First, download packages via koji/brew
c = utils_koji.KojiClient()
deps_rpms = []
k_dep = utils_koji.KojiPkgSpec(text=kernel_deps_koji_spec)
logging.info('Fetching kernel dependencies: %s', kernel_deps_koji_spec)
c.get_pkgs(k_dep, self.bindir)
rpm_file_name_list = c.get_pkg_rpm_file_names(k_dep)
if len(rpm_file_name_list) == 0:
raise error.TestError("No packages on brew/koji match spec %s" %
kernel_deps_koji_spec)
dep_rpm_basename = rpm_file_name_list[0]
deps_rpms.append(os.path.join(self.bindir, dep_rpm_basename))
k = utils_koji.KojiPkgSpec(text=kernel_koji_spec)
logging.info('Fetching kernel: %s', kernel_koji_spec)
c.get_pkgs(k, self.bindir)
rpm_file_name_list = c.get_pkg_rpm_file_names(k)
if len(rpm_file_name_list) == 0:
raise error.TestError("No packages on brew/koji match spec %s" %
kernel_koji_spec)
kernel_rpm_basename = rpm_file_name_list[0]
kernel_rpm_path = os.path.join(self.bindir, kernel_rpm_basename)
# Then install kernel rpm packages.
self._kernel_install_rpm(kernel_rpm_path, deps_rpms, need_reboot)
def _kernel_install_src(self, base_tree, config=None, config_list=None,
patch_list=None, need_reboot=True):
if not utils.is_url(base_tree):
base_tree = os.path.join(self.bindir, base_tree)
if not utils.is_url(config):
config = os.path.join(self.bindir, config)
kernel = self.job.kernel(base_tree, self.outputdir)
if patch_list:
patches = []
for p in patch_list.split():
# Make sure all the patches are in local.
if not utils.is_url(p):
continue
dst = os.path.join(self.bindir, os.path.basename(p))
local_patch = utils.get_file(p, dst)
patches.append(local_patch)
kernel.patch(*patches)
if not os.path.isfile(config):
config = None
if not config and not config_list:
kernel.config()
else:
kernel.config(config, config_list)
kernel.build()
kernel.install()
if need_reboot:
kernel.boot()
else:
kernel.add_to_bootloader()
def _kernel_install_git(self, repo, config, repo_base=None,
branch="master", commit=None, config_list=None,
patch_list=None, need_reboot=True):
repodir = os.path.join("/tmp", 'kernel_src')
repodir = git.get_repo(uri=repo, branch=branch,
destination_dir=repodir,
commit=commit, base_uri=repo_base)
self._kernel_install_src(repodir, config, config_list, patch_list,
need_reboot)
def execute(self, install_type="koji", params=None):
need_reboot = params.get("need_reboot") == "yes"
logging.info("Chose to install kernel through '%s', proceeding",
install_type)
if install_type == "rpm":
rpm_url = params.get("kernel_rpm_path")
kernel_deps_rpms = params.get("kernel_deps_rpms", None)
self._kernel_install_rpm(rpm_url, kernel_deps_rpms, need_reboot)
elif install_type in ["koji", "brew"]:
kernel_koji_spec = params.get("kernel_koji_spec")
kernel_deps_koji_spec = params.get("kernel_deps_koji_spec")
self._kernel_install_koji(kernel_koji_spec, kernel_deps_koji_spec,
need_reboot)
elif install_type == "git":
repo = params.get('kernel_git_repo')
repo_base = params.get('kernel_git_repo_base', None)
branch = params.get('kernel_git_branch', "master")
commit = params.get('kernel_git_commit', None)
patch_list = params.get("kernel_patch_list", None)
config = params.get('kernel_config')
config_list = params.get("kernel_config_list", None)
self._kernel_install_git(repo, config, repo_base, branch, commit,
config_list, patch_list, need_reboot)
elif install_type == "tar":
src_pkg = params.get("kernel_src_pkg")
config = params.get('kernel_config')
patch_list = params.get("kernel_patch_list", None)
self._kernel_install_src(src_pkg, config, None, patch_list,
need_reboot)
else:
logging.error("Could not find '%s' method, "
"keep the current kernel.", install_type)
|
ryfeus/lambda-packs | refs/heads/master | Opencv_pil/source36/numpy/f2py/tests/test_semicolon_split.py | 13 | from __future__ import division, absolute_import, print_function
import platform
import pytest
from . import util
from numpy.testing import assert_equal
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
code = """
python module {module}
usercode '''
void foo(int* x) {{
char dummy = ';';
*x = 42;
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_multiline(self):
assert_equal(self.module.foo(), 42)
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
code = """
python module {module}
usercode '''
void foo(int* x) {{
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
callprotoargument int*
callstatement {{ &
; &
x = 42; &
}}
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_callstatement(self):
assert_equal(self.module.foo(), 42)
|
selste/micropython | refs/heads/master | tests/basics/bytes_partition.py | 41 | try:
str.partition
except AttributeError:
print("SKIP")
raise SystemExit
print(b"asdf".partition(b'g'))
print(b"asdf".partition(b'a'))
print(b"asdf".partition(b's'))
print(b"asdf".partition(b'f'))
print(b"asdf".partition(b'd'))
print(b"asdf".partition(b'asd'))
print(b"asdf".partition(b'sdf'))
print(b"asdf".partition(b'as'))
print(b"asdf".partition(b'df'))
print(b"asdf".partition(b'asdf'))
print(b"asdf".partition(b'asdfa'))
print(b"asdf".partition(b'fasdf'))
print(b"asdf".partition(b'fasdfa'))
print(b"abba".partition(b'a'))
print(b"abba".partition(b'b'))
try:
print(b"asdf".partition(1))
except TypeError:
print("Raised TypeError")
else:
print("Did not raise TypeError")
try:
print(b"asdf".partition(b''))
except ValueError:
print("Raised ValueError")
else:
print("Did not raise ValueError")
|
csdms/wmt-metadata | refs/heads/master | wmtmetadata/cmd/__init__.py | 12133432 | |
konstruktoid/ansible-upstream | refs/heads/devel | test/units/modules/network/iosxr/__init__.py | 12133432 | |
LinDA-tools/LindaWorkbench | refs/heads/master | linda/endpoint_monitor/management/commands/__init__.py | 12133432 | |
DeMille/emailhooks | refs/heads/master | django_nonrel/djangotoolbox/sites/__init__.py | 12133432 | |
dippatel1994/oppia | refs/heads/develop | core/storage/__init__.py | 12133432 | |
olduvaihand/ProjectEuler | refs/heads/master | src/python/problem335.py | 1 | # -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem335.py
#
# Gathering the beans
# ===================
# Published on Saturday, 23rd April 2011, 04:00 pm
#
# Whenever Peter feels bored, he places some bowls, containing one bean each,
# in a circle. After this, he takes all the beans out of a certain bowl and
# drops them one by one in the bowls going clockwise. He repeats this, starting
# from the bowl he dropped the last bean in, until the initial situation
# appears again. For example with 5 bowls he acts as follows: So with 5 bowls
# it takes Peter 15 moves to return to the initial situation. Let M(x)
# represent the number of moves required to return to the initial situation,
# starting with x bowls. Thus, M(5) = 15. It can also be verified that M(100) =
# 10920. Find M(2k+1). Give your answer modulo 79.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
pavels/pootle | refs/heads/master | tests/views/admin.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.core.urlresolvers import reverse_lazy, reverse
from django.utils.safestring import mark_safe
from pytest_pootle.env import TEST_USERS
from pytest_pootle.factories import LanguageDBFactory
from pootle.core.paginator import paginate
from pootle.core.url_helpers import split_pootle_path
from pootle_app.models import PermissionSet
from pootle_app.views.admin.util import form_set_as_table
from pootle_project.models import Project
from pootle_translationproject.models import TranslationProject
ADMIN_URL = reverse_lazy('pootle-admin')
def _test_admin_view(response, project):
request = response.wsgi_request
qs = TranslationProject.objects.filter(
project=project).order_by('pootle_path')
page = paginate(request, qs)
url_kwargs = {
'project_code': project.code,
'dir_path': '',
'filename': ''}
assert page.number == response.context["objects"].number
assert page.start_index() == response.context["objects"].start_index()
assert page.end_index() == response.context["objects"].end_index()
assert (
list(response.context["objects"].object_list.values_list("pk", flat=True))
== list(qs.values_list("pk", flat=True)))
assert (
response.context["formset"].__class__.__name__
== "TranslationProjectFormFormSet")
assert response.context["page"] == "admin-languages"
assert response.context["browse_url"] == reverse(
'pootle-project-browse',
kwargs=url_kwargs)
assert response.context["translate_url"] == reverse(
'pootle-project-translate',
kwargs=url_kwargs)
assert (
response.context['project']
== {'code': project.code,
'name': project.fullname})
assert (
response.context["formset_text"]
== mark_safe(
form_set_as_table(
response.context["formset"],
lambda tp: (
u'<a href="%s">%s</a>'
% (reverse('pootle-tp-admin-permissions',
args=split_pootle_path(tp.pootle_path)[:2]),
tp.language)),
"language")))
def _admin_view_get(client, project):
return client.get(
reverse(
"pootle-project-admin-languages",
kwargs=dict(project_code=project.code)))
def _admin_view_post(client, project, **kwargs):
return client.post(
reverse(
"pootle-project-admin-languages",
kwargs=dict(project_code=project.code)),
kwargs)
@pytest.mark.django_db
def test_admin_not_logged_in(client):
"""Checks logged-out users cannot access the admin site."""
response = client.get(ADMIN_URL)
assert response.status_code == 403
@pytest.mark.django_db
def test_admin_regular_user(client, default):
"""Checks regular users cannot access the admin site."""
client.login(username=default.username, password='')
response = client.get(ADMIN_URL)
assert response.status_code == 403
@pytest.mark.django_db
def test_admin_access(client):
"""Tests that admin users can access the admin site."""
client.login(username="admin", password="admin")
response = client.get(ADMIN_URL)
assert response.status_code == 200
@pytest.mark.django_db
def test_admin_view_projects(client, request_users):
user = request_users["user"]
project = Project.objects.get(code="project0")
client.login(
username=user.username,
password=request_users["password"])
response = _admin_view_get(client, project)
if not user.is_superuser:
assert response.status_code == 403
return
_test_admin_view(response, project)
@pytest.mark.django_db
def test_admin_view_projects_manager(client, member, administrate):
project = Project.objects.get(code="project0")
criteria = {
'user': member,
'directory': project.directory}
ps = PermissionSet.objects.create(**criteria)
ps.positive_permissions = [administrate]
client.login(
username=member.username,
password=TEST_USERS[member.username]["password"])
response = _admin_view_get(client, project)
assert response.status_code == 200
_test_admin_view(response, project)
response = _admin_view_post(client, project)
assert response.status_code == 200
_test_admin_view(response, project)
@pytest.mark.django_db
def test_admin_view_projects_post(client, request_users):
project = Project.objects.get(code="project0")
user = request_users["user"]
client.login(
username=user.username,
password=request_users["password"])
if user.is_superuser:
return
response = _admin_view_post(client, project)
assert response.status_code == 403
@pytest.mark.django_db
def test_admin_view_projects_add_tp(english, client, admin):
user = admin
project = Project.objects.get(code="project0")
new_language = LanguageDBFactory()
TranslationProject.objects.create(language=english, project=project)
client.login(
username=user.username,
password=TEST_USERS["admin"]["password"])
get_response = _admin_view_get(client, project)
post_data = {}
formset = get_response.context["formset"]
forms = formset.forms + formset.extra_forms + [formset.management_form]
for form in forms:
for field in form.fields:
post_data["%s-%s" % (form.prefix, field)] = (
form.fields[field].initial
or form.initial.get(field, ""))
post_data["%s-language" % formset.extra_forms[0].prefix] = new_language.id
post_data["%s-project" % formset.extra_forms[0].prefix] = project.id
response = _admin_view_post(client, project, **post_data)
new_tp = TranslationProject.objects.get(language=new_language, project=project)
assert new_tp in response.context["objects"].object_list
_test_admin_view(response, project)
@pytest.mark.django_db
def test_admin_view_projects_delete_tp(english, client, admin):
user = admin
project = Project.objects.get(code="project0")
TranslationProject.objects.create(language=english, project=project)
client.login(
username=user.username,
password=TEST_USERS["admin"]["password"])
get_response = _admin_view_get(client, project)
post_data = {}
formset = get_response.context["formset"]
forms = formset.forms + formset.extra_forms + [formset.management_form]
for form in forms:
for field in form.fields:
post_data["%s-%s" % (form.prefix, field)] = (
form.fields[field].initial
or form.initial.get(field, ""))
tp_pk = post_data["form-0-id"]
post_data["form-0-DELETE"] = "true"
response = _admin_view_post(client, project, **post_data)
assert (
tp_pk
not in project.translationproject_set.values_list("pk", flat=True))
_test_admin_view(response, project)
|
abalkin/tz | refs/heads/master | tzdata-pkg/zic/zic.py | 1 | import textwrap
import sys
from datetime import datetime
HEADER = """\
from zic.classes import *
from datetime import *
"""
RAW_FILES = [
'africa', 'antarctica', 'asia', 'australasia',
'europe', 'northamerica', 'southamerica'
]
def lines(input):
"""Remove comments and empty lines"""
for raw_line in input:
line = raw_line.strip()
if line and not line.startswith('#'):
yield strip_comments(line)
def strip_comments(line):
quoted = False
for i, c in enumerate(line):
if c == '"':
quoted = not quoted
elif c == "#" and not quoted:
return line[:i].strip()
return line
OBSERVANCE_TEMPLATE = """\
Observance(gmtoff={},
rules={},
format='{}',
until={}),
"""
def compile(infile, outfile=None):
with open(infile) as input:
if outfile is None:
compile_stream(input, sys.stdout)
else:
with open(outfile, 'w') as output:
compile_stream(input, output)
def compile_stream(input, output, header=HEADER):
output.write(header)
observances = state = None
zones = {}
rules = {}
for line in lines(input):
fields = line.split()
if fields[0] == 'Zone':
names = fields[1].split('/')
z = zones
for name in names:
z = z.setdefault(name, {})
observances = z.setdefault('observances', [])
state = 'Zone'
del fields[:2]
elif fields[0] == 'Rule':
rules.setdefault(fields[1], []).append(fields[2:])
if state == 'Zone':
gmtoff, zone_rules, format = fields[:3]
until = format_until(fields[3:])
if until is None:
state = None
observances.append(
format_observance(gmtoff, zone_rules, format, until))
print_rules(rules, file=output)
print_zones(zones, file=output)
RULE_TEMPLATE = ('Rule({}, {}, {}, {}, {},\n'
' at={},\n'
' save={}, letters={!r})')
def format_rule(begin, end, type, in_month, on, at, save, letters):
begin = int(begin)
if end == 'only':
end = begin + 1
elif end == 'max':
end = 10000
else:
end = int(end) + 1
if type == '-':
type = None
if letters == '-':
letters = ''
at = format_at(at)
save = format_time(save)
return RULE_TEMPLATE.format(begin, end, type, in_month,
on, at, save, letters)
TIME_FORMATS = ['%H', '%H:%M', "%H:%M:%S"]
TIME_TYPES = {
'w': 'wall',
'u': 'utc',
'g': 'utc',
'z': 'utc',
's': 'std',
}
def format_time(t):
if t == '-':
return 'timedelta(0)'
if t.startswith('24'):
return 'timedelta(1)'
n = t.count(':')
fmt = TIME_FORMATS[n]
t = datetime.strptime(t, fmt).time()
args = ['hours={0.hour}', 'minutes={0.minute}', 'seconds={0.second)']
template = 'timedelta(%s)' % ', '.join(args[:n+1])
return template.format(t)
def format_at(at):
try:
time_type = TIME_TYPES[at[-1]]
except KeyError:
time_type = 'wall'
else:
at = at[:-1]
return '(%s, %r)' % (format_time(at), time_type)
def print_rules(rules, file):
prefix = ' ' * 8
for name, lines in rules.items():
file.write('class %s(Rules):\n'
' name ="%s"\n'
' rules = [\n' % (rules_name(name), name))
for args in lines:
rule = format_rule(*args)
file.write(textwrap.indent(rule, prefix) + ',\n')
file.write(' ]\n\n')
TIME_UNITS = 'hours', 'minutes', 'seconds'
def format_until(until):
n = len(until)
if n == 0:
return None
if n == 1:
return int(until[0])
return '(%s)' % ', '.join(repr(u) for u in until)
def format_delta(delta):
sign = ''
if delta.startswith('-'):
sign = '-'
delta = delta[1:]
args = ['%s=%s' % (unit, int(value))
for unit, value in zip(TIME_UNITS, delta.split(':'))]
return '%stimedelta(%s)' % (sign, ', '.join(args))
def format_observance(gmtoff, rules, format, until):
if rules == '-':
rules = None
elif ':' in rules:
rules = "'%s'" % rules
else:
rules = rules_name(rules)
return OBSERVANCE_TEMPLATE.format(format_delta(gmtoff),
rules, format, until)
def print_zones(zones, file, indent=0):
for name, info in sorted(zones.items()):
try:
observances = info['observances']
except KeyError:
file.write(indent * ' ' + 'class %s:\n' % name)
print_zones(info, file, indent + 4)
else:
prefix = indent * ' '
file.write(prefix + 'class %s(Zone):\n' % zone_name(name))
file.write(prefix + ' name = %r\n' % name)
file.write(prefix + ' observances = [\n')
for observance in observances:
file.write(textwrap.indent(observance, prefix + 8 * ' '))
file.write(prefix + '%s]\n' % (4 * ' '))
def rules_name(name):
return name.replace('-', '_')
zone_name = rules_name
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: zic infile [outfile]")
sys.exit(1)
if sys.argv[1] == '--all':
for f in RAW_FILES:
compile('raw/' + f, f + '.py')
else:
compile(*sys.argv[1:])
|
ByteInternet/django-oidc-provider | refs/heads/master | oidc_provider/migrations/0011_client_client_type.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-04 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oidc_provider', '0010_code_is_authentication'),
]
operations = [
migrations.AddField(
model_name='client',
name='client_type',
field=models.CharField(
choices=[(b'confidential', b'Confidential'), (b'public', b'Public')],
default=b'confidential',
help_text='<b>Confidential</b> clients are capable of maintaining the confidentiality of their '
'credentials. <b>Public</b> clients are incapable.',
max_length=30),
),
]
|
FirstAidKitten/Roguelike-Sandbox | refs/heads/master | fov.py | 1 | from tdl.map import quickFOV
import session
from settings import MAP_WIDTH, MAP_HEIGHT, FOV_ALGO, FOV_LIGHT_WALLS, TORCH_RADIUS
def is_visible_tile(x, y):
if x >= MAP_WIDTH or x < 0:
return False
elif y >= MAP_HEIGHT or y < 0:
return False
elif session.player.current_level.fov_blocked(x, y):
return False
else:
return True
def generate(x, y):
# TODO: separate FOV generation for monsters
# TODO: program FOV generation algorithm (so TDL is no longer needed)
return quickFOV(x, y, is_visible_tile, fov=FOV_ALGO, radius=TORCH_RADIUS, lightWalls=FOV_LIGHT_WALLS)
|
txomon/pytest | refs/heads/master | src/_pytest/resultlog.py | 2 | """ log machine-parseable test session result information in a plain
text file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import py
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption(
"--resultlog",
"--result-log",
action="store",
metavar="path",
default=None,
help="DEPRECATED path for machine-readable result log.",
)
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, "slaveinput"):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, "w", 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
from _pytest.deprecated import RESULT_LOG
from _pytest.warnings import _issue_config_warning
_issue_config_warning(RESULT_LOG, config)
def pytest_unconfigure(config):
resultlog = getattr(config, "_resultlog", None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(":")
fspart = False
else:
gpath.append(".")
else:
gpath.append("/")
fspart = True
name = node.name
if name[0] in "([":
gpath.pop()
gpath.append(name)
fspath = newfspath
return "".join(gpath)
class ResultLog(object):
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
print("%s %s" % (lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
print(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, "nodeid", None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(report=report)
code = res[1]
if code == "x":
longrepr = str(report.longrepr)
elif code == "X":
longrepr = ""
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, "reprcrash", None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, "!", str(excrepr))
|
meduz/scikit-learn | refs/heads/master | sklearn/datasets/tests/test_lfw.py | 42 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100,
download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
codesparkle/youtube-dl | refs/heads/master | youtube_dl/extractor/huffpost.py | 23 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_duration,
unified_strdate,
)
class HuffPostIE(InfoExtractor):
IE_DESC = 'Huffington Post'
_VALID_URL = r'''(?x)
https?://(embed\.)?live\.huffingtonpost\.com/
(?:
r/segment/[^/]+/|
HPLEmbedPlayer/\?segmentId=
)
(?P<id>[0-9a-f]+)'''
_TEST = {
'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677',
'md5': '55f5e8981c1c80a64706a44b74833de8',
'info_dict': {
'id': '52dd3e4b02a7602131000677',
'ext': 'mp4',
'title': 'Legalese It! with @MikeSacksHP',
'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more. ',
'duration': 1549,
'upload_date': '20140124',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['HTTP Error 404: Not Found'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
data = self._download_json(api_url, video_id)['data']
video_title = data['title']
duration = parse_duration(data.get('running_time'))
upload_date = unified_strdate(
data.get('schedule', {}).get('starts_at') or data.get('segment_start_date_time'))
description = data.get('description')
thumbnails = []
for url in filter(None, data['images'].values()):
m = re.match('.*-([0-9]+x[0-9]+)\.', url)
if not m:
continue
thumbnails.append({
'url': url,
'resolution': m.group(1),
})
formats = []
sources = data.get('sources', {})
live_sources = list(sources.get('live', {}).items()) + list(sources.get('live_again', {}).items())
for key, url in live_sources:
ext = determine_ext(url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
url, video_id, ext='mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
url + '?hdcore=2.9.5', video_id, f4m_id='hds', fatal=False))
else:
formats.append({
'format': key,
'format_id': key.replace('/', '.'),
'ext': 'mp4',
'url': url,
'vcodec': 'none' if key.startswith('audio/') else None,
})
if not formats and data.get('fivemin_id'):
return self.url_result('5min:%s' % data['fivemin_id'])
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': description,
'formats': formats,
'duration': duration,
'upload_date': upload_date,
'thumbnails': thumbnails,
}
|
copiesofcopies/youtube-transcription | refs/heads/master | upload-videos.py | 1 | # Copyright 2013 Aaron Williamson <aaron@copiesofcopies.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import yaml
import json
import urllib
import logging
import optparse
import gdata.youtube
import gdata.youtube.service
yt_service = gdata.youtube.service.YouTubeService()
# Turn on HTTPS/SSL access.
# Note: SSL is not available at this time for uploads.
yt_service.ssl = True
# Parse the yaml config file
with open('config.yaml', 'r') as config_file:
config = yaml.load(config_file.read())
# A complete client login request
yt_service.email = config['user_email']
yt_service.password = config['user_password']
yt_service.source = config['source']
yt_service.developer_key = config['developer_key']
yt_service.client_id = config['client_id']
yt_service.ProgrammaticLogin()
# Regex to extract video ID from uploaded video object. (Why there's
# no useful "ID" field is beyond me.)
video_id_regex = re.compile('http://gdata.youtube.com/feeds/api/videos/(\w+)</ns0:id>')
# Initialize options global
options = None
# Upload a video to YouTube and get back a YouTubeVideoEntry object
def upload_video(filename, metadata):
# Create a container object for video metadata
media_group = gdata.media.Group(
title=gdata.media.Title(text=metadata['title']),
description=gdata.media.Description(description_type='plain',
text=metadata['description']),
keywords=gdata.media.Keywords(text=metadata['keywords']),
category=gdata.media.Category(
text=metadata['category_term'],
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label=metadata['category_label']),
player=None,
private=gdata.media.Private()
)
# Create a YouTubeVideoEntry with the metadata associated
video_entry = gdata.youtube.YouTubeVideoEntry(media=media_group)
# Upload the video at `filename` and associate it with the new
# YouTubeVideoEntry
new_entry = yt_service.InsertVideoEntry(video_entry, filename)
return new_entry
# Download the video at a given URL
# TODO: handle missing or unavailable videos
def get_video_from_url(url):
if options.quiet:
reporthook = None
else:
reporthook = download_progress
(filename, headers) = urllib.urlretrieve(url, reporthook=reporthook)
if not options.quiet: sys.stdout.write("\n")
return filename
# Extract a video's YT ID from its full URL
def get_entry_id(entry_url):
m = video_id_regex.search(str(entry_url))
if m:
parsed_id = m.group(1)
return parsed_id
return False
# Map provided metadata to dict to be passed with new video (ensures
# empty strings are passed instead of None values)
def parse_metadata(metadata):
all_metadata = {
'local_id': '',
'title': '',
'description': '',
'keywords': '',
'category_term': '',
'category_label': ''
}
for k in all_metadata:
all_metadata[k] = metadata.get(k, '')
return all_metadata
# Print file download progress to stdout
def download_progress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\rProgress: %d%%" % percent)
sys.stdout.flush()
if __name__ == "__main__":
# Set up the command line argument parser
# TODO: check for required -i and -o parameters, exit if missing
parser = optparse.OptionParser()
parser.add_option('-i', '--input-file', action="store", dest="input_file",
help="""Input manifest file (JSON)""", default="")
parser.add_option('-o', '--output-file', action="store", dest="output_file",
help="""Output manifest file (optional; omit to save to
input file)""",
default="")
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
help="""Suppress informational messages""",
default=False)
options, args = parser.parse_args()
if options.input_file == '':
parser.print_help()
exit(-1)
# Set logging level
if options.quiet:
log_level = logging.ERROR
else:
log_level = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
# Parse the json videos file
with open(options.input_file, 'r') as f:
videos = json.load(f)
uploaded_ids = []
for video in videos:
# Download the video indicated by the URL
logging.info("Downloading video from %s..." % video)
fn = get_video_from_url(video)
# Fill in the video metadata values
metadata = parse_metadata(videos[video])
logging.info("Uploading video from %s to YouTube..." % video)
# Upload the video
uploaded_vid = upload_video(fn, metadata)
# Grab the YouTube ID and store it with the metadata
video_id = get_entry_id(uploaded_vid.id)
videos[video]['id'] = video_id
# Remove the local file
os.remove(fn)
logging.info("Finished uploading; YouTube ID is %s" % video_id)
# Write a json file identical to the input, except with the YT id
# added for each entry
if options.output_file:
output_fn = options.output_file
else:
output_fn = options.input_file
with open(output_fn, "wt") as output_file:
output_file.write(json.dumps(videos, indent=4, sort_keys=True))
|
TangXT/edx-platform | refs/heads/master | common/test/acceptance/pages/lms/course_page.py | 55 | """
Base class for pages in courseware.
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class CoursePage(PageObject):
"""
Abstract base class for page objects within a course.
"""
# Overridden by subclasses to provide the relative path within the course
# Paths should not include the leading forward slash.
url_path = ""
def __init__(self, browser, course_id):
"""
Course ID is currently of the form "edx/999/2013_Spring"
but this format could change.
"""
super(CoursePage, self).__init__(browser)
self.course_id = course_id
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
return BASE_URL + "/courses/" + self.course_id + "/" + self.url_path
|
hsnr-gamera/gamera | refs/heads/master | gamera/io.py | 1 | # Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
Defines a number of functions for pseudo-command-line OS functionality.
cd(directory)
pwd <-- can be used WITHOUT parens
ls(d='.')
rename(from,to)
get(namepatterns,verbose=1)
getstrings(namepatterns,verbose=1)
put(outlist,filename,writetype='w')
aget(namepatterns,verbose=1)
aput(outarray,filename,writetype='w')
bget(filename,numslices=1,xsize=64,ysize=64)
braw(filename,btype)
bput(outarray,filename,writeheader=0,packstring='h',writetype='wb')
mrget(filename)
find_dirs(sourcedir)
"""
## CHANGES:
## =======
## 08-07-23 ... added getcsv() to more easily handle tabbed files
## 07-11-26 ... more numpy conversion work
## 06-08-07 ... converted to numpy, changed version to 0.6
## 06-02-03 ... added add2afnihistory() to load modify afni HISTORY_NOTEs,
## and added that option to array2afni output
## 04-06-14 ... added getafniparam() to load in afni values from HEAD files
## 03-04-09 ... fixed brikget to load based on datatype, changed array2afni
## so that the default sliceorder is altplus, not seqplus
## 02-11-20 ... added binget(), binput(), array2afni(), version 0.5
## 02-10-20 ... added find_dirs() function, changed version to 0.4
## 01-11-15 ... changed aput() and put() to accept a delimiter
## 01-04-19 ... added oneperline option to put() function
## 99-11-07 ... added DAs quick flat-text-file loaders, load() and fload()
## 99-11-01 ... added version number (0.1) for distribution
## 99-08-30 ... Put quickload in here
## 99-06-27 ... Changed bget thing back ... confused ...
## 99-06-24 ... exchanged xsize and ysize in bget for non-square images (NT??)
## modified bget to raise an IOError when file not found
## 99-06-12 ... added load() and save() aliases for aget() and aput() (resp.)
## 99-04-13 ... changed aget() to ignore (!!!!) lines beginning with # or %
## 99-01-17 ... changed get() so ints come in as ints (not floats)
##
try:
import mmapfile
except Exception:
pass
import pstat
import glob, re, string, types, os, struct, copy, time, tempfile, sys
import csv
from types import *
import numpy as np
__version__ = 0.7
def wrap(f):
"""
Wraps a function so that if it's entered *by itself*
in the interpreter without ()'s, it gets called anyway
"""
class W:
def __init__(self, f):
self.f = f
def __repr__(self):
x =apply(self.f)
if x:
return repr(x)
else:
return ''
return W(f)
def cd (directory):
"""
Changes the working python directory for the interpreter.
Usage: cd(directory) where 'directory' is a string
"""
os.chdir(directory)
return
def pwd():
"""
Changes the working python directory for the interpreter.
Usage: pwd (no parens needed)
"""
return os.getcwd()
pwd = wrap(pwd)
def ls(d='.'):
"""
Produces a directory listing. Default is the current directory.
Usage: ls(d='.')
"""
os.system('ls '+d)
return None
def rename(source, dest):
"""
Renames files specified by UNIX inpattern to those specified by UNIX
outpattern. Can only handle a single '*' in the two patterns!!!
Usage: rename (source, dest) e.g., rename('*.txt', '*.c')
"""
infiles = glob.glob(source)
outfiles = []
incutindex = string.index(source,'*')
outcutindex = string.index(source,'*')
findpattern1 = source[0:incutindex]
findpattern2 = source[incutindex+1:]
replpattern1 = dest[0:incutindex]
replpattern2 = dest[incutindex+1:]
for fname in infiles:
if incutindex > 0:
newname = re.sub(findpattern1,replpattern1,fname,1)
if outcutindex < len(dest)-1:
if incutindex > 0:
lastone = string.rfind(newname,replpattern2)
newname = newname[0:lastone] + re.sub(findpattern2,replpattern2,fname[lastone:],1)
else:
lastone = string.rfind(fname,findpattern2)
if lastone <> -1:
newname = fname[0:lastone]
newname = newname + re.sub(findpattern2,replpattern2,fname[lastone:],1)
print fname, newname
os.rename(fname,newname)
return
def get (namepatterns,verbose=1):
"""
Loads a list of lists from text files (specified by a UNIX-style
wildcard filename pattern) and converts all numeric values to floats.
Uses the glob module for filename pattern conversion. Loaded filename
is printed if verbose=1.
Usage: get (namepatterns,verbose=1)
Returns: a 1D or 2D list of lists from whitespace delimited text files
specified by namepatterns; numbers that can be converted to floats
are so converted
"""
fnames = []
if type(namepatterns) in [ListType,TupleType]:
for item in namepatterns:
fnames = fnames + glob.glob(item)
else:
fnames = glob.glob(namepatterns)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepatterns+') !!'
return None
if verbose:
print fnames # so user knows what has been loaded
elements = []
for i in range(len(fnames)):
file = open(fnames[i])
newelements = map(string.split,file.readlines())
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atoi(newelements[i][j])
except ValueError:
try:
newelements[i][j] = string.atof(newelements[i][j])
except Exception:
pass
elements = elements + newelements
if len(elements)==1: elements = elements[0]
return elements
def getstrings (namepattern,verbose=1):
"""
Loads a (set of) text file(s), with all elements left as string type.
Uses UNIX-style wildcards (i.e., function uses glob). Loaded filename
is printed if verbose=1.
Usage: getstrings (namepattern, verbose=1)
Returns: a list of strings, one per line in each text file specified by
namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = map(string.split,file.readlines())
elements = elements + newelements
return elements
def getcsv (namepatterns, delimiter='\t', verbose=1):
"""
Loads a list of lists from text files (specified by a UNIX-style
wildcard filename pattern) and converts all numeric values to floats.
Uses the glob module for filename pattern conversion. Loaded filename
is printed if verbose=1.
Usage: get (namepatterns, delimiter='\t', verbose=1)
Returns: a 1D or 2D list of lists from whitespace delimited text files
specified by namepatterns; numbers that can be converted to floats
are so converted
"""
fnames = []
if type(namepatterns) in [ListType,TupleType]:
for item in namepatterns:
fnames = fnames + glob.glob(item)
else:
fnames = glob.glob(namepatterns)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepatterns+') !!'
return None
if verbose:
print fnames # so user knows what has been loaded
elements = []
for i in range(len(fnames)):
file = csv.reader(open(fnames[i]),delimiter=delimiter)
newelements = [row for row in file]
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atoi(newelements[i][j])
except Exception:
try:
newelements[i][j] = string.atof(newelements[i][j])
except Exception:
pass
elements = elements + newelements
if len(elements)==1: elements = elements[0]
return elements
def getrec(namepattern,verbose=0):
"""
Loads a numpy.recarray from a text file with the first row as the col names.
Usage: getrec(namepatterns,verbose=0)
Returns: a 2D recarray
"""
fname = glob.glob(namepattern)
if len(fname) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepatterns+') !!'
return None
if verbose:
print fnames # so user knows what has been loaded
d = get(fname[0])
h = d[0] # ASSUME FIRST ROW HOLDS COLUMN NAMES
d = d[1:]
return np.rec.fromrecords(d,names=h)
def put (outlist,fname,writetype='w',oneperline=0,delimiter=' '):
"""
Writes a passed mixed-type list (str and/or numbers) to an output
file, and then closes the file. Default is overwrite the destination
file.
Usage: put (outlist,fname,writetype='w',oneperline=0,delimiter=' ')
Returns: None
"""
if type(outlist) in [np.ndarray]:
aput(outlist,fname,writetype)
return
if type(outlist[0]) not in [ListType,TupleType]: # 1D list
outfile = open(fname,writetype)
if not oneperline:
outlist = pstat.list2string(outlist,delimiter)
outfile.write(outlist)
outfile.write('\n')
else: # they want one element from the list on each file line
for item in outlist:
outfile.write(str(item)+'\n')
outfile.close()
else: # 2D list (list-of-lists)
outfile = open(fname,writetype)
for row in outlist:
outfile.write(pstat.list2string(row,delimiter))
outfile.write('\n')
outfile.close()
return None
def isstring(x):
if type(x)==StringType:
return 1
else:
return 0
def aget (namepattern,verbose=1):
"""
Loads an array from 2D text files (specified by a UNIX-style wildcard
filename pattern). ONLY 'GET' FILES WITH EQUAL NUMBERS OF COLUMNS
ON EVERY ROW (otherwise returned array will be zero-dimensional).
Usage: aget (namepattern)
Returns: an array of integers, floats or objects (type='O'), depending on the
contents of the files specified by namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = file.readlines()
del_list = []
for row in range(len(newelements)):
if (newelements[row][0]=='%' or newelements[row][0]=='#'
or len(newelements[row])==1 or newelements[row][0]=='\r'):
del_list.append(row)
del_list.reverse()
for i in del_list:
newelements.pop(i)
newelements = map(string.split,newelements)
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atof(newelements[i][j])
except Exception:
pass
elements = elements + newelements
for row in range(len(elements)):
if np.add.reduce(np.array(map(isstring,elements[row])))==len(elements[row]):
print "A row of strings was found. Returning a LIST."
return elements
try:
elements = np.array(elements)
except TypeError:
elements = np.array(elements,dtype='O')
return elements
def aput (outarray,fname,writetype='w',delimit=' '):
"""
Sends passed 1D or 2D array to an output file and closes the file.
Usage: aput (outarray,fname,writetype='w',delimit=' ')
Returns: None
"""
outfile = open(fname,writetype)
if len(outarray.shape) == 1:
outarray = outarray[np.newaxis,:]
if len(outarray.shape) > 2:
raise TypeError, "put() and aput() require 1D or 2D arrays. Otherwise use some kind of pickling."
else: # must be a 2D array
for row in outarray:
outfile.write(string.join(map(str,row),delimit))
outfile.write('\n')
outfile.close()
return None
def bget(imfile,shp=None,unpackstr=np.int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extension.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: bget(imfile,shp=None,unpackstr=np.int16,bytesperpixel=2.0,sliceinit=0)
"""
if imfile[:3] == 'COR':
return CORget(imfile)
if imfile[-2:] == 'MR':
return mrget(imfile,unpackstr)
if imfile[-4:] == 'BRIK':
return brikget(imfile,unpackstr,shp)
if imfile[-3:] in ['mnc','MNC','inc','INC']:
return mincget(imfile,unpackstr,shp)
if imfile[-3:] == 'img':
return mghbget(imfile,unpackstr,shp)
if imfile[-6:] == 'bshort' or imfile[-6:] == 'bfloat':
if shp == None:
return mghbget(imfile,unpackstr=unpackstr,bytesperpixel=bytesperpixel,sliceinit=sliceinit)
else:
return mghbget(imfile,shp[0],shp[1],shp[2],unpackstr,bytesperpixel,sliceinit)
def CORget(infile):
"""
Reads a binary COR-nnn file (flattening file).
Usage: CORget(imfile)
Returns: 2D array of 16-bit ints
"""
d=braw(infile,np.int8)
d.shape = (256,256)
d = np.where(d>=0,d,256+d)
return d
def mincget(imfile,unpackstr=np.int16,shp=None):
"""
Loads in a .MNC file.
Usage: mincget(imfile,unpackstr=np.int16,shp=None) default shp = -1,20,64,64
"""
if shp == None:
shp = (-1,20,64,64)
os.system('mincextract -short -range 0 4095 -image_range 0 4095 ' +
imfile+' > minctemp.bshort')
try:
d = braw('minctemp.bshort',unpackstr)
except Exception:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in mincget()"
print shp, d.shape
d.shape = shp
os.system('rm minctemp.bshort')
return d
def brikget(imfile,unpackstr=np.int16,shp=None):
"""
Gets an AFNI BRIK file.
Usage: brikget(imfile,unpackstr=np.int16,shp=None) default shp: (-1,48,61,51)
"""
if shp == None:
shp = (-1,48,61,51)
try:
file = open(imfile, "rb")
except Exception:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in brikget()"
try:
header = imfile[0:-4]+'HEAD'
lines = open(header).readlines()
for i in range(len(lines)):
if string.find(lines[i],'DATASET_DIMENSIONS') <> -1:
dims = string.split(lines[i+2][0:string.find(lines[i+2],' 0')])
dims = map(string.atoi,dims)
if string.find(lines[i],'BRICK_FLOAT_FACS') <> -1:
count = string.atoi(string.split(lines[i+1])[2])
mults = []
for j in range(int(np.ceil(count/5.))):
mults += map(string.atof,string.split(lines[i+2+j]))
mults = np.array(mults)
if string.find(lines[i],'BRICK_TYPES') <> -1:
first5 = lines[i+2]
first5 = map(string.atoi,string.split(first5))
if first5[0] == 0:
unpackstr = np.uint8
elif first5[0] == 1:
unpackstr = np.int16
elif first5[0] == 3:
unpackstr = np.float32
elif first5[0] == 5:
unpackstr = np.complex32
dims.reverse()
shp = [-1]+dims
except IOError:
print "No header file. Continuing ..."
lines = None
print shp
print 'Using unpackstr:',unpackstr #,', bytesperpixel=',bytesperpixel
file = open(imfile, "rb")
bdata = file.read()
# the > forces big-endian (for or from Sun/SGI)
bdata = np.fromstring(bdata,unpackstr)
# littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
if (max(bdata)>1e30):
bdata = bdata.byteswap()
try:
bdata.shape = shp
except Exception:
print 'Incorrect shape ...',shp,len(bdata)
raise ValueError, 'Incorrect shape for file size'
if len(bdata) == 1:
bdata = bdata[0]
if np.sum(mults) == 0:
return bdata
try:
multshape = [1]*len(bdata.shape)
for i in range(len(bdata.shape)):
if len(mults) == bdata.shape[i]:
multshape[i] = len(mults)
break
mults.shape = multshape
return bdata*mults
except Exception:
return bdata
def mghbget(imfile,numslices=-1,xsize=64,ysize=64,
unpackstr=np.int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extensionp.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: mghbget(imfile, numslices=-1, xsize=64, ysize=64,
unpackstr=np.int16, bytesperpixel=2.0, sliceinit=0)
"""
try:
file = open(imfile, "rb")
except Exception:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in bget()"
try:
header = imfile[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
except Exception:
print "No header file. Continuing ..."
suffix = imfile[-6:]
if suffix == 'bshort':
pass
elif suffix[-3:] == 'img':
pass
elif suffix == 'bfloat':
unpackstr = np.float32
bytesperpixel = 4.0
sliceinit = 0.0
else:
print 'Not a bshort, bfloat or img file.'
print 'Using unpackstr:',unpackstr,', bytesperpixel=',bytesperpixel
imsize = xsize*ysize
file = open(imfile, "rb")
bdata = file.read()
numpixels = len(bdata) / bytesperpixel
if numpixels%1 != 0:
raise ValueError, "Incorrect file size in fmri.bget()"
else: # the > forces big-endian (for or from Sun/SGI)
bdata = np.fromstring(bdata,unpackstr)
# littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswap()
if (max(bdata)>1e30):
bdata = bdata.byteswap()
if suffix[-3:] == 'img':
if numslices == -1:
numslices = len(bdata)/8200 # 8200=(64*64*2)+8 bytes per image
xsize = 64
ysize = 128
slices = np.zeros((numslices,xsize,ysize),np.int32)
for i in range(numslices):
istart = i*8 + i*xsize*ysize
iend = i*8 + (i+1)*xsize*ysize
print i, istart,iend
slices[i] = np.reshape(np.array(bdata[istart:iend]),(xsize,ysize))
else:
if numslices == 1:
slices = np.reshape(np.array(bdata),[xsize,ysize])
else:
slices = np.reshape(np.array(bdata),[numslices,xsize,ysize])
if len(slices) == 1:
slices = slices[0]
return slices
def braw(fname,btype,shp=None):
"""
Opens a binary file, unpacks it, and returns a flat array of the
type specified. Use Numeric types ... np.float32, np.int64, etc.
Usage: braw(fname,btype,shp=None)
Returns: flat array of floats, or ints (if btype=np.int16)
"""
file = open(fname,'rb')
bdata = file.read()
bdata = np.fromstring(bdata,btype)
# littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswap() # didn't used to need this with '>' above
if (max(bdata)>1e30):
bdata = bdata.byteswap()
if shp:
try:
bdata.shape = shp
return bdata
except Exception:
pass
return np.array(bdata)
def glget(fname,btype):
"""
Load in a file containing pixels from glReadPixels dump.
Usage: glget(fname,btype)
Returns: array of 'btype elements with shape 'shape', suitable for im.ashow()
"""
d = braw(fname,btype)
d = d[8:]
f = open(fname,'rb')
shp = f.read(8)
f.close()
shp = np.fromstring(shp,np.int32)
shp[0],shp[1] = shp[1],shp[0]
try:
carray = np.reshape(d,shp)
return
except Exception:
pass
try:
r = d[0::3]+0
g = d[1::3]+0
b = d[2::3]+0
r.shape = shp
g.shape = shp
b.shape = shp
carray = np.array([r,g,b])
except Exception:
outstr = "glget: shape not correct for data of length "+str(len(d))
raise ValueError, outstr
return carray
def mget(fname,btype):
"""
Load in a file that was saved from matlab
Usage: mget(fname,btype)
"""
d = braw(fname,btype)
try:
header = fname[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
print xsize,ysize,numslices, d.shape
except Exception:
print "No header file. Continuing ..."
if numslices == 1:
d.shape = [ysize,xsize]
return np.transpose(d)*1
else:
d.shape = [numslices,ysize,xsize]
return np.transpose(d)*1
def mput(outarray,fname,writeheader=0,btype=np.int16):
"""
Save a file for use in matlab.
"""
outarray = np.transpose(outarray)
outdata = np.ravel(outarray).astype(btype)
outdata = outdata.tostring()
outfile = open(fname,'wb')
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[1],outarray.shape[0], 1, 0]
else:
hdr = [outarray.shape[2],outarray.shape[1],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def bput(outarray,fname,writeheader=0,packtype=np.int16,writetype='wb'):
"""
Writes the passed array to a binary output file, and then closes
the file. Default is overwrite the destination file.
Usage: bput (outarray,filename,writeheader=0,packtype=np.int16,writetype='wb')
"""
suffix = fname[-6:]
if suffix == 'bshort':
packtype = np.int16
elif suffix == 'bfloat':
packtype = np.float32
else:
print 'Not a bshort or bfloat file. Using packtype=',packtype
outdata = np.ravel(outarray).astype(packtype)
# littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# outdata = outdata.byteswap()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[0],outarray.shape[1], 1, 0]
else:
hdr = [outarray.shape[1],outarray.shape[2],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def mrget(fname,datatype=np.int16):
"""
Opens a binary .MR file and clips off the tail data portion of it, returning
the result as an array.
Usage: mrget(fname,datatype=np.int16)
"""
d = braw(fname,datatype)
if len(d) > 512*512:
return np.reshape(d[-512*512:],(512,512))
elif len(d) > 320*320:
return np.reshape(d[-320*320:],(320,320))
elif len(d) > 256*256:
return np.reshape(d[-256*256:],(256,256))
elif len(d) > 128*128:
return np.reshape(d[-128*128:],(128,128))
elif len(d) > 64*64:
return np.reshape(d[-64*64:],(64,64))
else:
return np.reshape(d[-32*32:],(32,32))
def quickload(fname,linestocut=4):
"""
Quickly loads in a long text file, chopping off first n 'linestocut'.
Usage: quickload(fname,linestocut=4)
Returns: array filled with data in fname
"""
f = open(fname,'r')
d = f.readlines()
f.close()
print fname,'read inp.'
d = d[linestocut:]
d = map(string.split,d)
print 'Done with string.split on lines.'
for i in range(len(d)):
d[i] = map(string.atoi,d[i])
print 'Conversion to ints done.'
return np.array(d)
def writedelimited (listoflists, delimiter, file, writetype='w'):
"""
Writes a list of lists in columns, separated by character(s) delimiter
to specified file. File-overwrite is the default.
Usage: writedelimited (listoflists,delimiter,filename,writetype='w')
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '------'
outfile.write(pstat.linedelimited(dashes,delimiter))
else:
outfile.write(pstat.linedelimited(row,delimiter))
outfile.write('\n')
outfile.close()
return None
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def writefc (listoflists,colsize,file,writetype='w'):
"""
Writes a list of lists to a file in columns of fixed size. File-overwrite
is the default.
Usage: writefc (listoflists,colsize,file,writetype='w')
Returns: None
"""
if type(listoflists) == np.ndarray:
listoflists = listoflists.tolist()
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
n = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*colsize
for j in range(len(n)):
dashes[j] = '-'*(colsize)
outfile.write(pstat.lineincols(dashes,colsize))
else:
outfile.write(pstat.lineincols(row,colsize))
outfile.write('\n')
outfile.close()
return None
def load(fname,lines_to_ignore=4,type='i'):
"""
Load in huge, flat, 2D text files. Can handle differing line-lengths AND
can strip #/% on UNIX (or with a better NT grep). Requires wc, grep, and
mmapfile.lib/.pyd. Type can be 'i', 'f' or 'd', for ints, floats or doubles,
respectively. Lines_to_ignore determines how many lines at the start of the
file to ignore (required for non-working grep).
Usage: load(fname,lines_to_ignore=4,type='i')
Returns: numpy array of specified type
"""
start = time.time() ## START TIMER
if type == 'i':
intype = int
elif type in ['f','d']:
intype = float
else:
raise ValueError, "type can be 'i', 'f' or 'd' in load()"
## STRIP OUT % AND # LINES
tmpname = tempfile.NamedTemporaryFile(delete=False).name
if sys.platform == 'win32':
# NT VERSION OF GREP DOESN'T DO THE STRIPPING ... SIGH
cmd = "grep.exe -v \'%\' "+fname+" > "+tmpname
print cmd
os.system(cmd)
else:
# UNIX SIDE SHOULD WORK
cmd = "cat "+fname+" | grep -v \'%\' |grep -v \'#\' > "+tmpname
print cmd
os.system(cmd)
## GET NUMBER OF ROWS, COLUMNS AND LINE-LENGTH, USING WC
wc = string.split(os.popen("wc "+tmpname).read())
numlines = int(wc[0]) - lines_to_ignore
tfp = open(tmpname)
if lines_to_ignore <> 0:
for i in range(lines_to_ignore):
junk = tfp.readline()
numcols = len(string.split(tfp.readline())) #int(float(wc[1])/numlines)
tfp.close()
## PREPARE INPUT SPACE
a = np.zeros((numlines*numcols), type)
block = 65536 # chunk to read, in bytes
data = mmapfile.mmapfile(tmpname, '', 0)
if lines_to_ignore <> 0 and sys.platform == 'win32':
for i in range(lines_to_ignore):
junk = data.readline()
i = 0
d = ' '
carryover = ''
while len(d) <> 0:
d = carryover + data.read(block)
cutindex = string.rfind(d,'\n')
carryover = d[cutindex+1:]
d = d[:cutindex+1]
d = map(intype,string.split(d))
a[i:i+len(d)] = d
i = i + len(d)
end = time.time()
print "%d sec" % round(end-start,2)
data.close()
os.remove(tmpname)
return np.reshape(a,[numlines,numcols])
def find_dirs(sourcedir):
"""Finds and returns all directories in sourcedir
Usage: find_dirs(sourcedir)
Returns: list of directory names (potentially empty)
"""
files = os.listdir(sourcedir)
dirs = []
for fname in files:
if os.path.isdir(os.path.join(sourcedir,fname)):
dirs.append(fname)
return dirs
# ALIASES ...
save = aput
def binget(fname,btype=None):
"""
Loads a binary file from disk. Assumes associated hdr file is in same
locationp. You can force an unpacking type, or else it tries to figure
it out from the filename (4th-to-last character). Hence, readable file
formats are ...
1bin=int8, sbin=int16, ibin=int32, fbin=float32, dbin=float64, etc.
Usage: binget(fname,btype=None)
Returns: data in file fname of type btype
"""
file = open(fname,'rb')
bdata = file.read()
file.close()
# if none given, assume character preceeding 'bin' is the unpacktype
if not btype:
btype = fname[-4]
try:
bdata = np.fromstring(bdata,btype)
except Exception:
raise ValueError, "Bad unpacking type."
# force the data on disk to be LittleEndian (for more efficient PC/Linux use)
if not np.little_endian:
bdata = bdata.byteswap()
try:
header = fname[:-3]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
print vals
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
bdata.shape = vals
except Exception:
print "No (or bad) header file. Returning unshaped array."
return np.array(bdata)
def binput(outarray,fname,packtype=None,writetype='wb'):
"""
Unravels outarray and writes the data to a file, always in LittleEndian
format, along with a header file containing the original data shape. Default
is overwrite the destination file. Tries to figure out packtype from
4th-to-last character in filename. Thus, the routine understands these
file formats ...
1bin=int8, sbin=int16, ibin=int32, fbin=float32, dbin=float64, etc.
Usage: binput(outarray,filename,packtype=None,writetype='wb')
"""
if not packtype:
packtype = fname[-4]
# a speck of error checking
if packtype == np.int16 and outarray.dtype.char == 'f':
# check to see if there's data loss
if max(np.ravel(outarray)) > 32767 or min(np.ravel(outarray))<-32768:
print "*** WARNING: CONVERTING FLOAT DATA TO OUT-OF RANGE INT16 DATA"
outdata = np.ravel(outarray).astype(packtype)
# force the data on disk to be little_endian (for more efficient PC/Linux use)
if not np.little_endian:
outdata = outdata.byteswap()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
# Now, write the header file
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex+2]+'hdr' # include .s or .f or .1 or whatever
except ValueError:
hdrname = fname
hdr = outarray.shape
print hdrname
outfile = open(hdrname,'w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def getafniparam(headfilename,paramname):
"""
Loads in an AFNI header file, and returns the values of 'paramname'.
Usage: getafniparam(headfile,paramname)
Returns: appropriate "type" for params, or None if fails
"""
if headfilename[-4:] == 'BRIK': # if asked for BRIK, change it to HEAD
headfilename = headfilename[:-4]+'HEAD'
d = get(headfilename)
lines = open(headfilename,'r').readlines()
for i in range(len(lines)):
if string.find(lines[i],paramname) <> -1:
count = d[i+1][-1]
gotten = 0
result = []
for j in range(i+2,len(lines)):
for k in range(len(d[j])):
if type(d[j][k]) == StringType:
result = d[j][k][1:count]
return result
else:
result.append(d[j][k])
gotten += 1
if gotten == count:
break
return result
return None
def add2afnihistory(headfilename,newtext):
"""
Adds 'newtext' to HISTORY_NOTE in afni file specified in headfilename.
Usage: add2afnihistory(headfile,newtext)
Returns: None
"""
if headfilename[-4:] == 'BRIK': # if asked for BRIK, change it to HEAD
headfilename = headfilename[:-4]+'HEAD'
d = get(headfilename)
lines = open(headfilename,'r').readlines()
for i in range(len(lines)):
if string.find(lines[i],'HISTORY_NOTE') <> -1:
bytecount = d[i+1][-1]
oldstr = lines[i+2][:-2]
date = '[python:*** %s] ' %time.asctime()
lines[i+2] = oldstr +'\\n' +date +newtext +'~\n'
lines[i+1] = ' count = %s\n' %str(len(lines[i+2]))
f = open(headfilename,'w')
f.writelines(lines)
f.close()
return
def array2afni(d,brikprefix,voltype=None,TR=2000,sliceorder='seqplus',geomparent=None,view=None,corrlength=1,briklabels=None,historytext=None):
"""
Converts an array 'd' to an AFNI BRIK/HEAD combo via putbin and to3d. Tries to
guess the AFNI volume type
voltype = {'-anat','-epan','-fim'}
geomparent = filename of the afni BRIK file with the same geometry
view = {'tlrc', 'acpc' or 'orig'}
corrlength = # of images used in the (single-timeseries) correlation (for fico)
briklabels = list of names (strings) to use for brick labels
historytext = string to be appended to the history file, if any
Usage: array2afni(d,brikprefix,voltype=None,TR=2000,
sliceorder='seqplus',geomparent=None,view=None,
corrlength=1,briklabels=None,historytext=None)
Returns: None
"""
# converts numpy typecode()s into appropriate strings for to3d command line
typecodemapping = {'c':'b', # character
'B':'b', # UnsignedInt8
'f':'f', # float0, float8, float16, float32
'd':'f', # float64
'b':'b', # int0, int8
'h':'', # int16
'i':'i', # int32
'l':'i'} # int
# Verify that the data is proper size (3- or 4-D)
if len(d.shape) not in [3,4]:
raise ValueError, "A 3D or 4D array is required for array2afni() ... %s" %d.shape
# Save out the array to a binary file, homebrew style
if d.dtype.char == np.float64:
outcode = 'f'
else:
outcode = d.dtype.char
tmpoutname = 'afnitmp.%sbin' % outcode
binput(d.astype(outcode),tmpoutname)
if not voltype:
if len(d.shape) == 3: # either anatomy or functional
if d.dtype.char in ['s','i','l']: # if floats, assume functional
voltype = '-anat'
else:
voltype = '-fim'
else: # 4D dataset, must be anatomical timeseries (epan)
voltype = '-anat'
if voltype[0] != '-':
voltype = '-'+voltype
if len(d.shape) == 3: # either anatomy or functional
timepts = 1
slices = d.shape[0]
timestr = ''
elif len(d.shape) == 4:
if voltype=='-fico':
timepts = 1
d = np.reshape(d,[d.shape[0]*d.shape[1],d.shape[2],d.shape[3]])
slices = d.shape[0]
timestr = '-statpar %s 1 1 ' % corrlength
else:
timepts = d.shape[0]
slices = d.shape[1]
timestr = '-time:zt %d %d %0.3f %s ' % (slices,timepts,TR,sliceorder)
cmd = 'to3d %s -prefix %s -session . ' % (voltype, brikprefix)
if not view:
view = 'orig'
cmd += '-view %s ' % view
if geomparent:
cmd += '-geomparent %s ' % geomparent
cmd += timestr
cmd += '3D%s:0:0:%d:%d:%d:%s' % (typecodemapping[d.dtype.char],d.shape[-1],d.shape[-2],slices*timepts,tmpoutname)
print cmd
os.system(cmd)
os.remove(tmpoutname)
os.remove(tmpoutname[:-3]+'hdr')
if len(d.shape)==4 and briklabels:
names = ''
for label in briklabels:
names += str(label)+'~'
count = len(names)
appendstr = """\n\ntype = string-attribute
name = BRICK_LABS
count = %s
'%s""" % (count, names)
f = open('%s+%s.HEAD' %(brikprefix,view), 'a')
f.write(appendstr)
f.close()
if historytext:
add2afnihistory('%s+%s.HEAD'%(brikprefix,view),historytext)
def makeDType(exemplar):
"""Return a dtype object based on the given list or dict.
This is a convenience function -- if you want to do anything sophisticated
it's best to compose the dtype "by hand".
"""
if type(exemplar) is dict:
names = exemplar.keys(); names.sort()
formats = [np.array(exemplar[key]).dtype for key in names]
return np.dtype({'names':names, 'formats':formats})
else:
formats = ','.join([np.array(val).dtype.str for val in exemplar])
return np.dtype(formats)
|
EraYaN/CouchPotatoServer | refs/heads/master | libs/subliminal/cache.py | 107 | # -*- coding: utf-8 -*-
# Copyright 2012 Nicolas Wack <wackou@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from functools import wraps
import logging
import os.path
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = ['Cache', 'cachedmethod']
logger = logging.getLogger(__name__)
class Cache(object):
"""A Cache object contains cached values for methods. It can have
separate internal caches, one for each service
"""
def __init__(self, cache_dir):
self.cache_dir = cache_dir
self.cache = defaultdict(dict)
self.lock = threading.RLock()
def __del__(self):
for service_name in self.cache:
self.save(service_name)
def cache_location(self, service_name):
return os.path.join(self.cache_dir, 'subliminal_%s.cache' % service_name)
def load(self, service_name):
with self.lock:
if service_name in self.cache:
# already loaded
return
self.cache[service_name] = defaultdict(dict)
filename = self.cache_location(service_name)
logger.debug(u'Cache: loading cache from %s' % filename)
try:
self.cache[service_name] = pickle.load(open(filename, 'rb'))
except IOError:
logger.info('Cache: Cache file "%s" doesn\'t exist, creating it' % filename)
except EOFError:
logger.error('Cache: cache file "%s" is corrupted... Removing it.' % filename)
os.remove(filename)
def save(self, service_name):
filename = self.cache_location(service_name)
logger.debug(u'Cache: saving cache to %s' % filename)
with self.lock:
pickle.dump(self.cache[service_name], open(filename, 'wb'))
def clear(self, service_name):
try:
os.remove(self.cache_location(service_name))
except OSError:
pass
self.cache[service_name] = defaultdict(dict)
def cached_func_key(self, func, cls=None):
try:
cls = func.im_class
except:
pass
return ('%s.%s' % (cls.__module__, cls.__name__), func.__name__)
def function_cache(self, service_name, func):
func_key = self.cached_func_key(func)
return self.cache[service_name][func_key]
def cache_for(self, service_name, func, args, result):
# no need to lock here, dict ops are atomic
self.function_cache(service_name, func)[args] = result
def cached_value(self, service_name, func, args):
"""Raises KeyError if not found"""
# no need to lock here, dict ops are atomic
return self.function_cache(service_name, func)[args]
def cachedmethod(function):
"""Decorator to make a method use the cache.
.. note::
This can NOT be used with static functions, it has to be used on
methods of some class
"""
@wraps(function)
def cached(*args):
c = args[0].config.cache
service_name = args[0].__class__.__name__
func_key = c.cached_func_key(function, cls=args[0].__class__)
func_cache = c.cache[service_name][func_key]
# we need to remove the first element of args for the key, as it is the
# instance pointer and we don't want the cache to know which instance
# called it, it is shared among all instances of the same class
key = args[1:]
if key in func_cache:
result = func_cache[key]
logger.debug(u'Using cached value for %s(%s), returns: %s' % (func_key, key, result))
return result
result = function(*args)
# note: another thread could have already cached a value in the
# meantime, but that's ok as we prefer to keep the latest value in
# the cache
func_cache[key] = result
return result
return cached
|
vsilent/smarty-bot | refs/heads/master | core/brain/turn/head/left/__init__.py | 12133432 | |
shawnwanderson/cmput404-project | refs/heads/master | venv/lib/python2.7/site-packages/django/conf/locale/de_CH/__init__.py | 12133432 | |
MatthewShao/mitmproxy | refs/heads/master | test/mitmproxy/proxy/protocol/test_tls.py | 12133432 | |
JasonGross/mozjs | refs/heads/master | python/mach/mach/commands/__init__.py | 12133432 | |
grodrigues3/test-infra | refs/heads/master | gubernator/github/main_test.py | 14 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-self-use
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import json
import unittest
import webtest
import handlers
import main
import models
app = webtest.TestApp(main.app)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
class AppTest(TestBase):
def setUp(self):
self.init_stubs()
def get_response(self, event, body):
if isinstance(body, dict):
body = json.dumps(body)
signature = handlers.make_signature(body)
return app.post('/webhook', body,
{'X-Github-Event': event,
'X-Hub-Signature': signature})
def test_webhook(self):
self.get_response('test', {'action': 'blah'})
hooks = list(models.GithubWebhookRaw.query())
self.assertEqual(len(hooks), 1)
self.assertIsNotNone(hooks[0].timestamp)
def test_webhook_bad_sig(self):
body = json.dumps({'action': 'blah'})
signature = handlers.make_signature(body + 'foo')
app.post('/webhook', body,
{'X-Github-Event': 'test',
'X-Hub-Signature': signature}, status=400)
def test_webhook_missing_sig(self):
app.post('/webhook', '{}',
{'X-Github-Event': 'test'}, status=400)
def test_webhook_unicode(self):
self.get_response('test', {'action': u'blah\u03BA'})
def test_webhook_status(self):
args = {
'name': 'owner/repo',
'sha': '1234',
'context': 'ci',
'state': 'success',
'target_url': 'http://example.com',
'description': 'passed the tests!',
'created_at': '2016-07-07T01:58:09Z',
'updated_at': '2016-07-07T02:03:12Z',
}
self.get_response('status', args)
statuses = list(models.GHStatus.query_for_sha('owner/repo', '1234'))
self.assertEqual(len(statuses), 1)
status = statuses[0]
args['repo'] = args.pop('name')
for key, value in args.iteritems():
status_val = getattr(status, key)
try:
status_val = status_val.strftime('%Y-%m-%dT%H:%M:%SZ')
except AttributeError:
pass
assert status_val == value, '%r != %r' % (getattr(status, key), value)
PR_EVENT_BODY = {
'repository': {'full_name': 'test/test'},
'pull_request': {
'number': 123,
'head': {'sha': 'cafe'},
'updated_at': '2016-07-07T02:03:12Z',
'state': 'open',
'user': {'login': 'rmmh'},
'assignees': [{'login': 'spxtr'}],
'title': 'test pr',
},
'action': 'opened',
}
def test_webhook_pr_open(self):
body = json.dumps(self.PR_EVENT_BODY)
self.get_response('pull_request', body)
digest = models.GHIssueDigest.get('test/test', 123)
self.assertTrue(digest.is_pr)
self.assertTrue(digest.is_open)
self.assertEqual(digest.involved, ['rmmh', 'spxtr'])
self.assertEqual(digest.payload['title'], 'test pr')
self.assertEqual(digest.payload['needs_rebase'], False)
def test_webhook_pr_open_and_status(self):
self.get_response('pull_request', self.PR_EVENT_BODY)
self.get_response('status', {
'repository': self.PR_EVENT_BODY['repository'],
'name': self.PR_EVENT_BODY['repository']['full_name'],
'sha': self.PR_EVENT_BODY['pull_request']['head']['sha'],
'context': 'test-ci',
'state': 'success',
'target_url': 'example.com',
'description': 'woop!',
'created_at': '2016-07-07T01:58:09Z',
'updated_at': '2016-07-07T02:03:15Z',
})
digest = models.GHIssueDigest.get('test/test', 123)
self.assertEqual(digest.payload['status'],
{'test-ci': ['success', 'example.com', 'woop!']})
|
domenicosolazzo/PythonKlout | refs/heads/master | pythonklout.py | 1 | __author__ = "Domenico Solazzo"
__version__ = "0.1"
RESPONSE_CODES = {
200: "OK: Success",
202: "Accepted: The request was accepted and the user was queued for processing",
401: "Not Authorized: either you need to provide authentication credentials, or the credentials provided aren't valid.",
403: "Bad Request: Your request is invalid and we'll return and error message that tells you why. This is the status code if you have exceeded the rate limit.",
404: "Not Found: either you are requesting an invalid URI or the resource in question doesn't exist.",
500: "Internal Server Error: we did something wrong.",
502: "Bad Gateway: returned if Klout is down or being upgraded.",
503: "Service Unavailable: the Klout servers are up, but are overloaded with requests. Try again later."
}
class KloutError( Exception ):
def __init__(self, code=0, msg=''):
super(KloutError, self).__init__()
self.code = code
self.msg = msg
def __str__(self):
return repr(self)
def __repr__(self):
return "%i: %s" % (self.code, self.msg)
class Klout( object ):
def __init__(self, key, serviceType="service"):
self._apiKey = key
self.__service = self.__getProxyFactory(serviceType)
def __getProxyFactory(self, serviceType):
service = None
if serviceType == "test":
service = TestKloutService(serviceType)
else:
service = KloutService(self._apiKey)
self.__service = service
return self.__service
def score(self, users):
"""
Retrieve a Klout score
@param: users - List of usernames
@return: A list of tuples in the form (username, klout_score)
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users": users}
result = self.__service.makeCall("score", query)
return result
def show(self, users):
"""
Retrieve a user object
@param: users - List of usernames
@return: A dictionary with the returned data
"""
if not users:
raise KloutError(0, "No Users.")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("user", query)
return result
def topics(self, users):
"""
Returns the top 3 topics objects
@param: users - A list of usernames
@return: A list of dicts in the form [{username:['topic1, topic2, topic3]..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("topics", query)
return result
def influencerOf(self, users):
"""
Returns up to 5 user score pairs for user that are influencer for the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencerOf", query)
return result
def influencedBy(self, users):
"""
Returns up to 5 user score pairs for user that are influenced by the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencedBy", query)
return result
class KloutService(object):
def __init__(self, apiKey):
self.apiKey = apiKey
self.VERSION_API = "/1/"
self.API_URL = "api.klout.com"
def getCallUrl(self, callName):
servicePath = ""
if callName == "score":
servicePath = "klout.json"
elif callName == "user":
servicePath = "users/show.json"
elif callName == "topics":
servicePath = "users/topics.json"
elif callName == "influencedBy":
servicePath = "soi/influenced_by.json"
elif callName == "influencerOf":
servicePath = "soi/influencer_of.json"
else:
raise Exception("Url not available")
return self.VERSION_API + servicePath
def _remove_empty_params(self, query):
if not isinstance(query, type({})):
raise Exception("Wrong query in input")
returnedQuery = {}
for key in query:
if not query[key] == None:
returnedQuery[key] = query[key]
return returnedQuery
def makeCall(self, callName, query):
import urllib, httplib, json
url = self.getCallUrl(callName)
query = self._remove_empty_params(query)
if 'key' not in query:
query["key"] = self.apiKey
queryStr = urllib.urlencode(query)
if len(query) > 0:
if url.find("?") == -1:
url = url + "?" + queryStr
else:
url = url + "&" + queryStr
try:
conn = httplib.HTTPConnection(self.API_URL)
conn.request('GET', url)
response = conn.getresponse()
data = response.read()
data = json.loads(data)
except httplib.HTTPException as err:
msg = err.read() or RESPONSE_CODES.get(err.code, err.message)
raise KloutError(err.code, msg)
except ValueError:
msg = "Invalid data: %s" % data
raise KloutError(0, msg)
return data
class TestKloutService(KloutService):
def makeCall(self, callName, query):
if callName == "score":
return {"users":[{"twitter_screen_name":"user1","kscore":23.02}]}
elif callName == "user":
return {"users":[{
"twitter_id": "111111",
"twitter_screen_name":"name",
"score":{
"kscore":10,
"slope":1,
"description":"description",
"kclass_id":1,
"kclass":"Socializer",
"kclass_description":"kclass description",
"network_score":22,
"amplification_score":18,
"true_reach": 10,
"delta_1day": 0.2,
"delta_5day": 0.4
}
}]}
elif callName == "topics":
return {"users":[{"twitter_screen_name":"user1", "topics":["python"]}]}
elif callName == "influencedBy":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "influencerOf":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "history":
return {'dates':[], 'klout_score':[], 'amplification':[],
'retweets':[], 'mentions':[],'network':[],
'followers_following':[], 'followers_count':[], 'mentioners':[],
'retweeters':[],'true_reach':[],'in_out':[]
}
|
goodwinnk/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/conf/locale/en_GB/formats.py | 234 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
anushreejangid/csmpe-main | refs/heads/master | csmpe/core_plugins/csm_install_operations/utils.py | 2 | import sys
import importlib
class ServerType:
TFTP_SERVER = 'TFTP'
FTP_SERVER = 'FTP'
SFTP_SERVER = 'SFTP'
LOCAL_SERVER = 'LOCAL'
def import_module(module, path=None):
if path is not None:
sys.path.append(path)
try:
return importlib.import_module(module)
except:
return None
def concatenate_dirs(dir1, dir2):
"""
Appends dir2 to dir1. It is possible that either/both dir1 or/and dir2 is/are None
"""
result_dir = dir1 if dir1 is not None and len(dir1) > 0 else ''
if dir2 is not None and len(dir2) > 0:
if len(result_dir) == 0:
result_dir = dir2
else:
result_dir += '/' + dir2
return result_dir
def is_empty(obj):
"""
These conditions are considered empty
s = [], s = None, s = '', s = ' ', s = 'None'
"""
if isinstance(obj, str):
obj = obj.replace('None', '').strip()
if obj:
return False
return True
def update_device_info_udi(ctx):
# _update_device_info() and _update_udi() are removed in condoor-ng
# ctx._connection._update_device_info()
# ctx._connection._update_udi()
ctx._csm.save_data("device_info", ctx._connection.device_info)
ctx._csm.save_data("udi", ctx._connection.udi)
|
pamfilos/invenio | refs/heads/master-sql-fixes | modules/oairepository/lib/oai_repository_regression_tests.py | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAI Repository Regression Test Suite."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
import time
from datetime import datetime, timedelta
import re
from cStringIO import StringIO
from invenio.config import CFG_SITE_URL, \
CFG_OAI_SLEEP, \
CFG_OAI_LOAD, \
CFG_OAI_ID_FIELD
from invenio.dbquery import run_sql
from invenio.intbitset import intbitset
from invenio import oai_repository_server, search_engine
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
class OAIRepositoryTouchSetTest(InvenioTestCase):
"""Check OAI-PMH consistency when touching a set."""
def setUp(self):
"""Backup the current configuration"""
self.timestamps = run_sql("SELECT id, last_updated FROM oaiREPOSITORY")
def tearDown(self):
"""Restore timestamps"""
for id, last_updated in self.timestamps:
run_sql("UPDATE oaiREPOSITORY SET last_updated=%s WHERE id=%s", (last_updated, id))
def test_touching_set(self):
"""oairepository - touch a set"""
req = StringIO()
oai_repository_server.oai_list_records_or_identifiers(req, {'verb': 'ListIdentifiers', 'metadataPrefix': 'marcxml', 'set':'cern:experiment'})
response = req.getvalue()
current_timestamps = re.findall("<datestamp>(.*?)</datestamp>", response)
current_timestamps = [datetime(*time.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')[:-3]) for timestamp in current_timestamps]
last_timestamp = max(current_timestamps)
future_timestamp = last_timestamp + timedelta(0, 5) ## 5 seconds in the future to the last record
future_timestamp = future_timestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
req = StringIO()
oai_repository_server.oai_list_records_or_identifiers(req, {'verb': 'ListIdentifiers', 'metadataPrefix': 'marcxml', 'set':'cern:experiment', 'from': future_timestamp})
response = req.getvalue()
self.failIf(re.findall("<datestamp>(.*?)</datestamp>", response))
from invenio.oai_repository_admin import touch_oai_set
touch_oai_set('cern:experiment')
req = StringIO()
oai_repository_server.oai_list_records_or_identifiers(req, {'verb': 'ListIdentifiers', 'metadataPrefix': 'marcxml', 'set':'cern:experiment', 'from': future_timestamp})
response = req.getvalue()
new_timestamps = re.findall("<datestamp>(.*?)</datestamp>", response)
new_timestamps = [datetime(*time.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')[:-3]) for timestamp in new_timestamps]
self.assertEqual(len(new_timestamps), len(current_timestamps), "new %s, old %s, from: %s" % (new_timestamps, current_timestamps, future_timestamp))
self.failUnless(new_timestamps > current_timestamps)
class OAIRepositoryWebPagesAvailabilityTest(InvenioTestCase):
"""Check OAI Repository web pages whether they are up or not."""
def test_oai_server_pages_availability(self):
"""oairepository - availability of OAI server pages"""
baseurl = CFG_SITE_URL + '/oai2d'
_exports = [#fast commands first:
'?verb=Identify',
'?verb=ListMetadataFormats',
# sleepy commands now:
'?verb=ListSets',
'?verb=ListRecords',
'?verb=GetRecord']
error_messages = []
for url in [baseurl + page for page in _exports]:
if url.endswith('Identify') or \
url.endswith('ListMetadataFormats'):
pass
else:
# some sleep required for verbs other than Identify
# and ListMetadataFormats, since oai2d refuses too
# frequent access:
time.sleep(CFG_OAI_SLEEP)
error_messages.extend(test_web_page_content(url,
expected_text=
'</OAI-PMH>'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
class TestSelectiveHarvesting(InvenioTestCase):
"""Test set, from and until parameters used to do selective harvesting."""
def test_set(self):
"""oairepository - testing selective harvesting with 'set' parameter"""
self.assertEqual(intbitset([10, 17]), oai_repository_server.oai_get_recid_list(set_spec="cern:experiment"))
self.assert_("Multifractal analysis of minimum bias events" in \
''.join([oai_repository_server.print_record(recID) for recID in \
oai_repository_server.oai_get_recid_list(set_spec="cern:experiment")]))
self.assert_("Multifractal analysis of minimum bias events" not in \
''.join([oai_repository_server.print_record(recID) for recID in \
oai_repository_server.oai_get_recid_list(set_spec="cern:theory")]))
self.failIf(oai_repository_server.oai_get_recid_list(set_spec="nonExistingSet"))
def test_from_and_until(self):
"""oairepository - testing selective harvesting with 'from' and 'until' parameters"""
req = StringIO()
# List available records, get datestamps and play with them
oai_repository_server.oai_list_records_or_identifiers(req, {'verb': 'ListIdentifiers', 'metadataPrefix': 'marcxml'})
identifiers = req.getvalue()
datestamps = re.findall('<identifier>(?P<id>.*?)</identifier>\s*<datestamp>(?P<date>.*?)</datestamp>', identifiers, re.M)
sample_datestamp = datestamps[0][1] # Take one datestamp
sample_oai_id = datestamps[0][0] # Take corresponding oai id
sample_id = search_engine.perform_request_search(p=sample_oai_id,
f=CFG_OAI_ID_FIELD)[0] # Find corresponding system number id
# There must be some datestamps
self.assertNotEqual([], datestamps)
# We must be able to retrieve an id with the date we have just found
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(fromdate=sample_datestamp), "%s not in %s (fromdate=%s)" % (sample_id, oai_repository_server.oai_get_recid_list(fromdate=sample_datestamp), sample_datestamp))
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(untildate=sample_datestamp), "%s not in %s" % (sample_id, oai_repository_server.oai_get_recid_list(untildate=sample_datestamp)))
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(untildate=sample_datestamp, \
fromdate=sample_datestamp))
# Same, with short format date. Eg 2007-12-13
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(fromdate=sample_datestamp.split('T')[0]))
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(untildate=sample_datestamp.split('T')[0]))
self.assert_(sample_id in oai_repository_server.oai_get_recid_list(fromdate=sample_datestamp.split('T')[0], \
untildate=sample_datestamp.split('T')[0]))
# At later date (year after) we should not find our id again
sample_datestamp_year = int(sample_datestamp[0:4])
sample_datestamp_rest = sample_datestamp[4:]
later_datestamp = str(sample_datestamp_year + 1) + sample_datestamp_rest
self.assert_(sample_id not in oai_repository_server.oai_get_recid_list(fromdate=later_datestamp))
# At earlier date (year before) we should not find our id again
earlier_datestamp = str(sample_datestamp_year - 1) + sample_datestamp_rest
self.assert_(sample_id not in oai_repository_server.oai_get_recid_list(untildate=earlier_datestamp))
# From earliest date to latest date must include all oai records
dates = [(time.mktime(time.strptime(date[1], "%Y-%m-%dT%H:%M:%SZ")), date[1]) for date in datestamps]
dates = dict(dates)
sorted_times = dates.keys()
sorted_times.sort()
earliest_datestamp = dates[sorted_times[0]]
latest_datestamp = dates[sorted_times[-1]]
self.assertEqual(oai_repository_server.oai_get_recid_list(), \
oai_repository_server.oai_get_recid_list(fromdate=earliest_datestamp, \
untildate=latest_datestamp))
def test_resumption_token(self):
"""oairepository - testing harvesting with bad resumption token"""
# Non existing resumptionToken
req = StringIO()
oai_repository_server.oai_list_records_or_identifiers(req, {'resumptionToken': 'foobar', 'verb': 'ListRecords'})
self.assert_('badResumptionToken' in req.getvalue())
class TestPerformance(InvenioTestCase):
"""Test performance of the repository """
def setUp(self):
"""Setting up some variables"""
# Determine how many records are served
self.number_of_records = len(oai_repository_server.oai_get_recid_list("", "", ""))
if CFG_OAI_LOAD < self.number_of_records:
self.number_of_records = CFG_OAI_LOAD
def test_response_speed_oai(self):
"""oairepository - speed of response for oai_dc output"""
allowed_seconds_per_record_oai = 0.15
# Test oai ListRecords performance
t0 = time.time()
oai_repository_server.oai_list_records_or_identifiers(StringIO(), {'metadataPrefix': 'oai_dc', 'verb': 'ListRecords'})
t = time.time() - t0
if t > self.number_of_records * allowed_seconds_per_record_oai:
self.fail("""Response for ListRecords with metadataPrefix=oai_dc took too much time:
%s seconds.
Limit: %s seconds""" % (t, self.number_of_records * allowed_seconds_per_record_oai))
def test_response_speed_marcxml(self):
"""oairepository - speed of response for marcxml output"""
allowed_seconds_per_record_marcxml = 0.15
# Test marcxml ListRecords performance
t0 = time.time()
oai_repository_server.oai_list_records_or_identifiers(StringIO(), argd={'metadataPrefix': 'marcxml', 'verb': 'ListRecords'})
t = time.time() - t0
if t > self.number_of_records * allowed_seconds_per_record_marcxml:
self.fail("""Response for ListRecords with metadataPrefix=marcxml took too much time:\n
%s seconds.
Limit: %s seconds""" % (t, self.number_of_records * allowed_seconds_per_record_marcxml))
TEST_SUITE = make_test_suite(OAIRepositoryTouchSetTest,
OAIRepositoryWebPagesAvailabilityTest,
TestSelectiveHarvesting,
TestPerformance)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
leansoft/edx-platform | refs/heads/master | common/djangoapps/util/admin.py | 163 | """Admin interface for the util app. """
from ratelimitbackend import admin
from util.models import RateLimitConfiguration
admin.site.register(RateLimitConfiguration)
|
tgianos/zerotodocker | refs/heads/master | security_monkey/0.3.4/security_monkey-api/config-deploy.py | 6 | # Insert any config items here.
# This will be fed into Flask/SQLAlchemy inside security_monkey/__init__.py
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/security_monkey/security_monkey-deploy.log"
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:securitymonkeypassword@postgres:5432/secmonkey'
SQLALCHEMY_POOL_SIZE = 50
SQLALCHEMY_MAX_OVERFLOW = 15
ENVIRONMENT = 'ec2'
USE_ROUTE53 = False
FQDN = '<PUBLIC_IP_ADDRESS>'
API_PORT = '5000'
WEB_PORT = '443'
FRONTED_BY_NGINX = True
NGINX_PORT = '443'
WEB_PATH = '/static/ui.html'
BASE_URL = 'https://{}/'.format(FQDN)
SECRET_KEY = '<INSERT_RANDOM_STRING_HERE>'
MAIL_DEFAULT_SENDER = 'securitymonkey@<YOURDOMAIN>.com'
SECURITY_REGISTERABLE = True
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = False
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = '<INSERT_RANDOM_STRING_HERE>'
SECURITY_POST_LOGIN_VIEW = WEB_PATH
SECURITY_POST_REGISTER_VIEW = WEB_PATH
SECURITY_POST_CONFIRM_VIEW = WEB_PATH
SECURITY_POST_RESET_VIEW = WEB_PATH
SECURITY_POST_CHANGE_VIEW = WEB_PATH
# This address gets all change notifications
SECURITY_TEAM_EMAIL = []
SECURITY_SEND_REGISTER_EMAIL = False
# These are only required if using SMTP instead of SES
EMAILS_USE_SMTP = False # Otherwise, Use SES
MAIL_SERVER = 'smtp.<YOUREMAILPROVIDER>.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'securitymonkey'
MAIL_PASSWORD = '<YOURPASSWORD>'
|
leiguo3/Vulkan_AS | refs/heads/master | code/data/shaders/compileshaders.py | 6 | import sys
import os
import glob
import subprocess
if len(sys.argv) < 2:
sys.exit("Please provide a target directory")
if not os.path.exists(sys.argv[1]):
sys.exit("%s is not a valid directory" % sys.argv[1])
path = sys.argv[1]
shaderfiles = []
for exts in ('*.vert', '*.frag', '*.comp', '*.geom', '*.tesc', '*.tese'):
shaderfiles.extend(glob.glob(os.path.join(path, exts)))
failedshaders = []
for shaderfile in shaderfiles:
print("\n-------- %s --------\n" % shaderfile)
if subprocess.call("glslangvalidator -V %s -o %s.spv" % (shaderfile, shaderfile), shell=True) != 0:
failedshaders.append(shaderfile)
print("\n-------- Compilation result --------\n")
if len(failedshaders) == 0:
print("SUCCESS: All shaders compiled to SPIR-V")
else:
print("ERROR: %d shader(s) could not be compiled:\n" % len(failedshaders))
for failedshader in failedshaders:
print("\t" + failedshader)
|
danimo/qt-creator | refs/heads/master | tests/system/suite_debugger/tst_simple_debug/test.py | 2 | #############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
# Requires Qt 4.8
targets = Targets.desktopTargetClasses() & ~Targets.DESKTOP_474_GCC
# using a temporary directory won't mess up a potentially existing
workingDir = tempDir()
checkedTargets, projectName = createNewQtQuickApplication(workingDir, targets=targets)
editor = waitForObject(":Qt Creator_QmlJSEditor::QmlJSTextEditorWidget")
if placeCursorToLine(editor, "MouseArea.*", True):
type(editor, '<Up>')
type(editor, '<Return>')
typeLines(editor, ['Timer {',
'interval: 1000',
'running: true',
'onTriggered: console.log("Break here")'])
invokeMenuItem("File", "Save All")
filesAndLines = [
{ "%s.Resources.qml\.qrc./.main\\.qml" % projectName : 'onTriggered.*' },
{ "%s.Sources.main\\.cpp" % projectName : "viewer.setOrientation\\(.+\\);" }
]
test.log("Setting breakpoints")
result = setBreakpointsForCurrentProject(filesAndLines)
if result:
expectedBreakpointsOrder = [{os.path.join(workingDir, projectName, "main.cpp"):10},
{os.path.join(workingDir, projectName, "main.qml"):10}]
# Only use 4.7.4 to work around QTBUG-25187
availableConfigs = iterateBuildConfigs(len(checkedTargets), "Debug")
progressBarWait()
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version (need Qt 4.7.4) - leaving without debugging.")
for kit, config in availableConfigs:
test.log("Selecting '%s' as build config" % config)
verifyBuildConfig(len(checkedTargets), kit, config, True, enableQmlDebug=True)
# explicitly build before start debugging for adding the executable as allowed program to WinFW
invokeMenuItem("Build", "Rebuild All")
waitForCompile(300000)
if not checkCompile():
test.fatal("Compile had errors... Skipping current build config")
continue
allowAppThroughWinFW(workingDir, projectName, False)
if not doSimpleDebugging(len(checkedTargets), kit, config,
len(expectedBreakpointsOrder), expectedBreakpointsOrder):
try:
stopB = findObject(':Qt Creator.Stop_QToolButton')
if stopB.enabled:
clickButton(stopB)
except:
pass
deleteAppFromWinFW(workingDir, projectName, False)
# close application output window of current run to avoid mixing older output on the next run
ensureChecked(":Qt Creator_AppOutput_Core::Internal::OutputPaneToggleButton")
clickButton(waitForObject("{type='CloseButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
try:
clickButton(waitForObject(":Close Debugging Session.Yes_QPushButton", 2000))
except:
pass
else:
test.fatal("Setting breakpoints failed - leaving without testing.")
invokeMenuItem("File", "Exit")
def init():
removeQmlDebugFolderIfExists()
def cleanup():
removeQmlDebugFolderIfExists()
|
tschneidereit/servo | refs/heads/master | etc/servo_gdb.py | 233 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
A set of simple pretty printers for gdb to make debugging Servo a bit easier.
To load these, you need to add something like the following to your .gdbinit file:
python
import sys
sys.path.insert(0, '/home/<path to git checkout>/servo/src/etc')
import servo_gdb
servo_gdb.register_printers(None)
end
"""
import gdb
# Print Au in both raw value and CSS pixels
class AuPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
i32_type = gdb.lookup_type("i32")
au = self.val.cast(i32_type)
return "{0}px".format(au / 60.0)
# Print a U8 bitfield as binary
class BitFieldU8Printer:
def __init__(self, val):
self.val = val
def to_string(self):
u8_type = gdb.lookup_type("u8")
value = self.val.cast(u8_type)
return "[{0:#010b}]".format(int(value))
# Print a struct with fields as children
class ChildPrinter:
def __init__(self, val):
self.val = val
def children(self):
children = []
for f in self.val.type.fields():
children.append((f.name, self.val[f.name]))
return children
def to_string(self):
return None
# Allow a trusted node to be dereferenced in the debugger
class TrustedNodeAddressPrinter:
def __init__(self, val):
self.val = val
def children(self):
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
value = self.val.cast(node_type)
return [('Node', value)]
def to_string(self):
return self.val.address
# Extract a node type ID from enum
class NodeTypeIdPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
u8_ptr_type = gdb.lookup_type("u8").pointer()
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
enum_type = self.val.type.fields()[int(enum_0)].type
return str(enum_type).lstrip('struct ')
# Printer for std::Option<>
class OptionPrinter:
def __init__(self, val):
self.val = val
def is_some(self):
# Get size of discriminator
d_size = self.val.type.fields()[0].type.sizeof
if d_size > 0 and d_size <= 8:
# Read first byte to check if None or Some
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
discriminator = int(ptr.dereference())
return discriminator != 0
raise "unhandled discriminator size"
def children(self):
if self.is_some():
option_type = self.val.type
# Get total size and size of value
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
t_size = option_type.sizeof
value_type = option_type.fields()[1].type.fields()[1].type
v_size = value_type.sizeof
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
return [('Some', data_ptr)]
return [('None', None)]
def to_string(self):
return None
# Useful for debugging when type is unknown
class TestPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return "[UNKNOWN - type = {0}]".format(str(self.val.type))
type_map = [
('struct Au', AuPrinter),
('FlowFlags', BitFieldU8Printer),
('IntrinsicWidths', ChildPrinter),
('PlacementInfo', ChildPrinter),
('TrustedNodeAddress', TrustedNodeAddressPrinter),
('NodeTypeId', NodeTypeIdPrinter),
('Option', OptionPrinter),
]
def lookup_servo_type(val):
val_type = str(val.type)
for (type_name, printer) in type_map:
if val_type == type_name or val_type.endswith("::" + type_name):
return printer(val)
return None
# return TestPrinter(val)
def register_printers(obj):
gdb.pretty_printers.append(lookup_servo_type)
|
signalfx/maestro-ng | refs/heads/main | maestro/version.py | 1 | # Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015-2018 SignalFx, Inc.
name = 'maestro-ng'
version = '0.8.1'
|
Cazomino05/Test1 | refs/heads/master | vendor/google-breakpad/src/testing/scripts/generator/cpp/gmock_class_test.py | 78 | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
|
alanjw/GreenOpenERP-Win-X86 | refs/heads/7.0 | openerp/addons/mrp/product.py | 56 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
"bom_ids": fields.one2many('mrp.bom', 'product_id','Bill of Materials', domain=[('bom_id','=',False)]),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'bom_ids': []
})
return super(product_product, self).copy(cr, uid, id, default, context=context)
product_product()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jtattermusch/grpc | refs/heads/master | examples/python/no_codegen/greeter_server.py | 9 | # Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import logging
import grpc
protos, services = grpc.protos_and_services("helloworld.proto")
class Greeter(services.GreeterServicer):
def SayHello(self, request, context):
return protos.HelloReply(message='Hello, %s!' % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
services.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
F-AOSP/platform_external_skia | refs/heads/aosp-5.1 | tools/pyutils/gs_utils.py | 66 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Utilities for accessing Google Cloud Storage.
TODO(epoger): move this into tools/utils for broader use?
"""
# System-level imports
import os
import posixpath
import sys
try:
from apiclient.discovery import build as build_service
except ImportError:
print ('Missing google-api-python-client. Please install it; directions '
'can be found at https://developers.google.com/api-client-library/'
'python/start/installation')
raise
# Local imports
import url_utils
def download_file(source_bucket, source_path, dest_path,
create_subdirs_if_needed=False):
""" Downloads a single file from Google Cloud Storage to local disk.
Args:
source_bucket: GCS bucket to download the file from
source_path: full path (Posix-style) within that bucket
dest_path: full path (local-OS-style) on local disk to copy the file to
create_subdirs_if_needed: boolean; whether to create subdirectories as
needed to create dest_path
"""
source_http_url = posixpath.join(
'http://storage.googleapis.com', source_bucket, source_path)
url_utils.copy_contents(source_url=source_http_url, dest_path=dest_path,
create_subdirs_if_needed=create_subdirs_if_needed)
def list_bucket_contents(bucket, subdir=None):
""" Returns files in the Google Cloud Storage bucket as a (dirs, files) tuple.
Uses the API documented at
https://developers.google.com/storage/docs/json_api/v1/objects/list
Args:
bucket: name of the Google Storage bucket
subdir: directory within the bucket to list, or None for root directory
"""
# The GCS command relies on the subdir name (if any) ending with a slash.
if subdir and not subdir.endswith('/'):
subdir += '/'
subdir_length = len(subdir) if subdir else 0
storage = build_service('storage', 'v1')
command = storage.objects().list(
bucket=bucket, delimiter='/', fields='items(name),prefixes',
prefix=subdir)
results = command.execute()
# The GCS command returned two subdicts:
# prefixes: the full path of every directory within subdir, with trailing '/'
# items: property dict for each file object within subdir
# (including 'name', which is full path of the object)
dirs = []
for dir_fullpath in results.get('prefixes', []):
dir_basename = dir_fullpath[subdir_length:]
dirs.append(dir_basename[:-1]) # strip trailing slash
files = []
for file_properties in results.get('items', []):
file_fullpath = file_properties['name']
file_basename = file_fullpath[subdir_length:]
files.append(file_basename)
return (dirs, files)
|
mimischi/django-clock | refs/heads/develop | clock/shifts/tests/test_views.py | 1 | """Test shift app views.
All messages are tested for the default English strings.
"""
from django.contrib.messages import get_messages
from django.utils import timezone, translation
from freezegun import freeze_time
from test_plus.test import TestCase
from clock.contracts.models import Contract
from clock.shifts.models import Shift
class ManualShiftViewTest(TestCase):
"""
Test that the manual buttons for the shift views are working as intended.
"""
def setUp(self):
self.user = self.make_user()
self.contract1 = Contract.objects.create(
employee=self.user, department="Test department", hours="50"
)
def test_manual_shift_start(self):
"""Assert that we can start a shift when logged in and without having a
current shift.
"""
with self.login(username=self.user.username, password="password"):
response = self.post(
"shift:quick_action",
data={"_start": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
with translation.override("en"):
messages = [msg for msg in get_messages(response.wsgi_request)]
shift = Shift.objects.all()[0]
self.assertFalse(shift.is_finished)
self.assertIsNone(shift.finished)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].__str__(), "Your shift has started!")
def test_cannot_start_another_shift(self):
"""Assert that we cannot have two shifts running at the same time."""
with self.login(username=self.user.username, password="password"):
self.post(
"shift:quick_action",
data={"_start": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
response2 = self.post(
"shift:quick_action",
data={"_start": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
messages = [msg for msg in get_messages(response2.wsgi_request)]
shift = Shift.objects.all()[0]
self.assertFalse(shift.is_finished)
self.assertIsNone(shift.finished)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].__str__(), "You already have an active shift!")
@freeze_time("2015-01-01 12:00")
def test_start_stop_shift(self):
"""
Assert that we can start and stop a shift.
"""
with self.login(username=self.user.username, password="password"):
self.post(
"shift:quick_action",
data={"_start": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
# Check that we cannot finish the shift yet.
response = self.post(
"shift:quick_action",
data={"_stop": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
messages = [msg for msg in get_messages(response.wsgi_request)]
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].message,
'<ul class="errorlist nonfield">'
"<li>A shift cannot be shorter than 5 minutes. "
"We deleted it for you :)</li></ul>",
)
# Start shift again.
self.post(
"shift:quick_action",
data={"_start": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
# Try and finish it later.
with freeze_time("2015-01-01 12:40"):
response2 = self.post(
"shift:quick_action",
data={"_stop": True},
follow=True,
extra={"HTTP_ACCEPT_LANGUAGE": "en"},
)
messages = [msg for msg in get_messages(response2.wsgi_request)]
shift = Shift.objects.all().first()
self.assertTrue(shift.is_finished)
self.assertIsNotNone(shift.finished)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].__str__(), "Your shift has finished!")
class ShiftsViewTest(TestCase):
"""
Test views of Shift app.
"""
def setUp(self):
self.user1 = self.make_user()
self.contract1 = Contract.objects.create(
employee=self.user1, department="Test department", hours="40"
)
def test_login_required_for_shift_views(self):
"""
Make sure the views of the app are only accessible while logged in.
"""
# Test basic creation / deletion views
self.assertLoginRequired("shift:list")
self.assertLoginRequired("shift:new")
self.assertLoginRequired("shift:edit", pk=1)
self.assertLoginRequired("shift:delete", pk=1)
# Test the other list views. Contract IDs used reflect non-existing
# contracts!
# self.assertLoginRequired(
# 'shift:archive_day', year=2016, month=6, day=10)
# self.assertLoginRequired('shift:archive_week', year=2016, week=10)
self.assertLoginRequired("shift:archive_month_numeric", year=2016, month=5)
self.assertLoginRequired(
"shift:archive_month_contract_numeric", year=2016, month=5, contract=0
)
self.assertLoginRequired("shift:article_year_archive", year=2016)
def test_logged_in_shift_views(self):
"""
Test whether a logged in user can access all pages of the shifts app.
Edit/delete pages should NOT work if the object is not found!
"""
user1 = self.make_user("user1")
self.assertLoginRequired("shift:list")
with self.login(username=user1.username, password="password"):
# Test basic creation / deletion views
self.get_check_200("shift:list")
self.get_check_200("shift:new")
edit = self.get("shift:edit", pk=1)
self.response_404(edit)
delete = self.get("shift:delete", pk=1)
self.response_404(delete)
# Test other list views. The template here is not working.
# Therefore a 404 error is expected!
# day = self.get('shift:archive_day', year=2016, month=5, day=1)
# self.response_404(day)
# self.get_check_200('shift:archive_week', year=2016, week=10)
self.get_check_200("shift:archive_month_numeric", year=2016, month=5)
self.get_check_200(
"shift:archive_month_contract_numeric", year=2016, month=5, contract=0
)
self.get_check_200("shift:article_year_archive", year=2016)
def test_surf_shift_list_wo_date(self):
"""Assert that we can surf through the shift list without specifying the
current month.
"""
user1 = self.make_user("user1")
now = timezone.now()
shift = Shift.objects.create(
employee=user1, started=now, finished=now + timezone.timedelta(0, 3600)
)
shift.save()
with self.login(username=user1.username, password="password"):
# Go into the shift list, but do not define any month and let the
# backend figure it out by itself
self.get_check_200("shift:list")
# Edit the just created shift
self.get_check_200("shift:edit", pk=shift.pk)
# Try to delete it.
self.get_check_200("shift:delete", pk=shift.pk)
def test_surf_shift_list_w_date(self):
"""Assert that we can surf through the shift list while specifying the
current month.
"""
user1 = self.make_user("user1")
now = timezone.now()
shift = Shift.objects.create(
employee=user1, started=now, finished=now + timezone.timedelta(0, 3600)
)
shift.save()
with self.login(username=user1.username, password="password"):
self.get_check_200(
"shift:archive_month_numeric", year=now.year, month=now.month
)
self.get_check_200("shift:edit", pk=shift.pk)
self.get_check_200("shift:delete", pk=shift.pk)
def test_surf_shift_list_w_date_and_contract(self):
"""Assert that we can surf through the shift list while specifying the
current month and a contract.
"""
user1 = self.make_user("user1")
contract = self.contract1
now = timezone.now()
shift = Shift.objects.create(
employee=user1,
started=now,
finished=now + timezone.timedelta(0, 3600),
contract=self.contract1,
)
shift.save()
with self.login(username=user1.username, password="password"):
self.get_check_200(
"shift:archive_month_contract_numeric",
year=now.year,
month=now.month,
contract=contract.pk,
)
self.get_check_200("shift:edit", pk=shift.pk)
self.get_check_200("shift:delete", pk=shift.pk)
|
andreasBihlmaier/arni | refs/heads/master | arni_countermeasure/src/arni_countermeasure/constraint_not.py | 2 | from constraint_item import *
class ConstraintNot(ConstraintItem):
"""An constraints consisting of another constraint negated."""
def __init__(self, constraint):
super(ConstraintNot, self).__init__()
#: the constraint to be negated
#: :type: ConstraintItem
self.__constraint = constraint
def evaluate_constraint(self, storage):
"""Evaluate if the constraint inside this constraint is false or not.
:param storage: The storage where the incoming statistics are saved.
:type storage: RatedStatisticStorage
:return: True iff the constraint inside is False.
"""
return not self.__constraint.evaluate_constraint(storage)
|
willthames/ansible | refs/heads/devel | lib/ansible/plugins/shell/__init__.py | 26 | # (c) 2016 RedHat
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import ansible.constants as C
import time
import random
from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import shlex_quote
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellBase(object):
def __init__(self):
self.env = dict()
if C.DEFAULT_MODULE_SET_LOCALE:
module_locale = C.DEFAULT_MODULE_LANG or os.getenv('LANG', 'en_US.UTF-8')
self.env.update(
dict(
LANG=module_locale,
LC_ALL=module_locale,
LC_MESSAGES=module_locale,
)
)
def env_prefix(self, **kwargs):
env = self.env.copy()
env.update(kwargs)
return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
def get_remote_filename(self, pathname):
base_name = os.path.basename(pathname.strip())
return base_name.strip()
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, paths, mode):
cmd = ['chmod', mode]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def chown(self, paths, user):
cmd = ['chown', user]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def set_user_facl(self, paths, user, mode):
"""Only sets acls for users as that's really all we need"""
cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
cmd.extend(paths)
cmd = [shlex_quote(c) for c in cmd]
return ' '.join(cmd)
def remove(self, path, recurse=False):
path = shlex_quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def exists(self, path):
cmd = ['test', '-e', shlex_quote(path)]
return ' '.join(cmd)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
# When system is specified we have to create this in a directory where
# other users can read and access the temp directory. This is because
# we use system to create tmp dirs for unprivileged users who are
# sudo'ing to a second unprivileged user. The only dirctories where
# that is standard are the tmp dirs, /tmp and /var/tmp. So we only
# allow one of those two locations if system=True. However, users
# might want to have some say over which of /tmp or /var/tmp is used
# (because /tmp may be a tmpfs and want to conserve RAM or persist the
# tmp files beyond a reboot. So we check if the user set REMOTE_TMP
# to somewhere in or below /var/tmp and if so use /var/tmp. If
# anything else we use /tmp (because /tmp is specified by POSIX nad
# /var/tmp is not).
if system:
# FIXME: create 'system tmp dirs' config/var and check tmpdir is in those values to allow for /opt/tmp, etc
if tmpdir.startswith('/var/tmp'):
basetmpdir = '/var/tmp'
else:
basetmpdir = '/tmp'
else:
if tmpdir is None:
basetmpdir = C.DEFAULT_REMOTE_TMP
else:
basetmpdir = tmpdir
basetmp = self.join_path(basetmpdir, basefile)
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
cmd += ' %s echo %s=%s echo %s %s' % (self._SHELL_AND, basefile, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# shlex_quote will make the shell return the string verbatim
user_home_path = shlex_quote(user_home_path)
return 'echo %s' % user_home_path
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will break pipelining mode
if cmd.strip() != '':
cmd = shlex_quote(cmd)
cmd_parts = []
if shebang:
shebang = shebang.replace("#!", "").strip()
else:
shebang = ""
cmd_parts.extend([env_string.strip(), shebang, cmd])
if arg_path is not None:
cmd_parts.append(arg_path)
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
def append_command(self, cmd, cmd_to_append):
"""Append an additional command if supported by the shell"""
if self._SHELL_AND:
cmd += ' %s %s' % (self._SHELL_AND, cmd_to_append)
return cmd
def wrap_for_exec(self, cmd):
"""wrap script execution with any necessary decoration (eg '&' for quoted powershell script paths)"""
return cmd
|
naousse/odoo | refs/heads/8.0 | addons/subscription/__init__.py | 441 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import subscription
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sadanandb/pmt | refs/heads/master | src/tactic/ui/startup/share_wdg.py | 6 | ###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = ['ShareWdg', 'LocalItemWdg', 'ShareItemWdg', 'ShareItemCbk']
from pyasm.common import Environment, Common, Config
from pyasm.search import Search
from pyasm.biz import Project
from pyasm.command import Command
from pyasm.web import DivWdg, HtmlElement, SpanWdg, Table, Widget
from pyasm.widget import IconWdg, TextWdg, TextAreaWdg
import os, shutil
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import ActionButtonWdg, IconButtonWdg, SingleButtonWdg
from tactic.ui.input import TextInputWdg
from tactic.ui.app import SearchWdg
from tactic.ui.panel import ViewPanelWdg
from tactic.ui.container import SmartMenu, Menu, MenuItem
class ShareWdg(BaseRefreshWdg):
'''This is the welcome widget widget will appear on creation of a new
project
'''
def get_display(my):
top = my.top
top.add_class("spt_share_top")
my.set_as_panel(top)
top.add_color("background", "background")
title = DivWdg()
top.add(title)
title.add_style("font-size: 18px")
title.add_style("font-weight: bold")
title.add_style("text-align: center")
title.add_style("padding: 10px")
#title.add_style("margin: -10px -10px 10px -10px")
title.add_gradient("background", "background3", 5, -10)
title.add("Share Project")
# add the main layout
#table = ResizableTableWdg()
table = Table()
table.add_color("color", "color")
top.add(table)
table.add_row()
left = table.add_cell()
left.add_border()
left.add_style("vertical-align: top")
left.add_style("min-width: 250px")
left.add_style("height: 400px")
left.add_color("background", "background3")
left.add(my.get_share_wdg() )
right = table.add_cell()
right.add_border()
right.add_style("vertical-align: top")
right.add_style("min-width: 400px")
right.add_style("width: 100%")
right.add_style("height: 400px")
right.add_style("padding: 5px")
right.add_class("spt_share_content")
share_item_wdg = ShareItemWdg()
right.add(share_item_wdg)
return top
def get_share_wdg(my):
div = DivWdg()
div.add_style("padding: 20px")
msg = '''<p>Before starting to work on a project that you are sharing, you should import the starting point.</p>'''
div.add(msg)
button = ActionButtonWdg(title="Import")
div.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var class_name = 'tactic.ui.sync.SyncImportWdg';
var top = bvr.src_el.getParent(".spt_share_top");
var content = top.getElement(".spt_share_content");
spt.panel.load(content, class_name);
//spt.panel.load_popup("Sync Import", class_name);
'''
} )
msg = '''<p>This allows you to create a share for this project. This will allow you to share this project with others.</p>'''
div.add(msg)
button = ActionButtonWdg(title="Share")
div.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var class_name = 'tactic.ui.sync.SyncServerAddWdg';
spt.panel.load_popup("Sync Share", class_name);
'''
} )
title_wdg = DivWdg()
div.add( title_wdg )
title_wdg.add( "Local" )
title_wdg.add_style("padding: 5px")
title_wdg.add_color("background", "background", -10)
title_wdg.add_border()
title_wdg.add_style("margin: 5px -22px 10px -22px")
local_code = Config.get_value("install", "server") or ""
local_div = DivWdg()
div.add(local_div)
local_div.add_class("spt_share_item")
local_div.add_attr("spt_server_code", local_code)
local_div.add_class("hand")
local_div.add(local_code)
local_div.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var class_name = 'tactic.ui.startup.LocalItemWdg';
var top = bvr.src_el.getParent(".spt_share_top");
var content = top.getElement(".spt_share_content");
spt.panel.load(content, class_name);
//spt.panel.load_popup("Sync Import", class_name);
'''
} )
div.add("<br/>")
search = Search("sthpw/sync_server")
shares = search.get_sobjects()
title_wdg = DivWdg()
div.add( title_wdg )
title_wdg.add( "Share List" )
title_wdg.add_style("padding: 5px")
title_wdg.add_color("background", "background", -10)
title_wdg.add_border()
title_wdg.add_style("margin: 5px -22px 10px -22px")
shares_div = DivWdg()
div.add(shares_div)
shares_div.add_relay_behavior( {
'type': 'mouseup',
'bvr_match_class': 'spt_share_item',
'cbjs_action': '''
var server_code = bvr.src_el.getAttribute("spt_server_code");
var class_name = 'tactic.ui.startup.ShareItemWdg';
var kwargs = {
server_code: server_code
}
var top = bvr.src_el.getParent(".spt_share_top");
var content = top.getElement(".spt_share_content");
spt.panel.load(content, class_name, kwargs);
'''
} )
bgcolor = shares_div.get_color("background", -5)
shares_div.add_relay_behavior( {
'type': 'mouseover',
'bvr_match_class': 'spt_share_item',
'bgcolor': bgcolor,
'cbjs_action': '''
bvr.src_el.setStyle("background", bvr.bgcolor);
'''
} )
shares_div.add_relay_behavior( {
'type': 'mouseout',
'bvr_match_class': 'spt_share_item',
'cbjs_action': '''
bvr.src_el.setStyle("background", "");
'''
} )
from pyasm.security import AccessManager
access_manager = AccessManager()
project = Project.get()
project_code = project.get_code()
# add in a context menu
menu = my.get_context_menu()
menus = [menu.get_data()]
menus_in = {
'SHARE_ITEM_CTX': menus,
}
SmartMenu.attach_smart_context_menu( shares_div, menus_in, False )
count = 0
for share in shares:
# hide the shares that are not in this project
rules = share.get_value("access_rules");
access_manager.add_xml_rules(rules)
key1 = { 'code': project_code }
key2 = { 'code': '*' }
keys = [key1, key2]
if not access_manager.check_access("project", keys, "allow", default="deny"):
continue
share_div = DivWdg()
shares_div.add(share_div)
share_div.add_class("spt_share_item")
share_div.add_attr("spt_server_code", share.get_code())
share_div.add_class("hand")
share_div.add(share.get_code())
share_div.add_attr("title", share.get_value("description") )
share_div.add_style("padding: 5px")
base_dir = share.get_value("base_dir")
if base_dir:
base_div = SpanWdg()
share_div.add(base_div)
base_div.add_style("font-size: 0.9em")
base_div.add_style("font-style: italic")
base_div.add_style("opacity: 0.5")
base_div.add(" (%s)" % base_dir)
share_div.add_attr("spt_share_code", share.get_code() )
SmartMenu.assign_as_local_activator( share_div, 'SHARE_ITEM_CTX' )
count += 1
if not count:
share_div = DivWdg()
shares_div.add(share_div)
share_div.add("<i>No shares</i>")
share_div.add_style("padding: 5px")
return div
def get_context_menu(my):
menu = Menu(width=180)
menu.set_allow_icons(False)
menu_item = MenuItem(type='title', label='Actions')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Remove Share')
menu.add(menu_item)
menu_item.add_behavior({
'type': 'click_up',
'cbjs_action': '''
if (!confirm("Delete share?") ) {
return;
}
var activator = spt.smenu.get_activator(bvr);
var code = activator.getAttribute("spt_share_code");
var class_name = 'tactic.ui.startup.ShareItemCbk';
var kwargs = {
'action': 'delete',
'code': code
};
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
var top = activator.getParent(".spt_share_top");
spt.panel.refresh(top);
'''
})
return menu
class LocalItemWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
my.set_as_panel(top)
title_wdg = DivWdg()
top.add( title_wdg )
title_wdg.add( "Local" )
title_wdg.add_style("padding: 10px")
title_wdg.add_color("background", "background", -10)
title_wdg.add_border()
title_wdg.add_style("margin: -6px -6px 10px -6px")
title_wdg.add_style("font-weight: bold")
from tactic.ui.container import TabWdg
tab = TabWdg(selected="Info", show_add=False)
top.add(tab)
tab.add(my.get_info_wdg())
return top
def get_info_wdg(my):
div = DivWdg()
div.set_name("Info")
div.add_style("padding: 20px")
table = Table()
div.add(table)
table.add_color("color", "color")
#table.add_style("height: 280px")
table.set_unique_id()
table.add_smart_style("spt_table_header", "width", "200px")
table.add_smart_style("spt_table_header", "text-align", "right")
table.add_smart_style("spt_table_header", "padding-right", "20px")
table.add_smart_style("spt_table_header", "margin-bottom", "10px")
table.add_smart_style("spt_table_element", "vertical-align", "top")
table.add_row()
#if my.mode == 'insert':
# read_only = False
#else:
# read_only = True
read_only = False
code = Config.get_value("install", "server") or ""
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Code: ")
td.add_style("vertical-align: top")
text = TextInputWdg(name="code", read_only=read_only)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(code)
return div
class ShareItemWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
my.set_as_panel(top)
top.add_class("spt_share_item")
my.server_code = my.kwargs.get("server_code")
if not my.server_code:
top.add("No server selected")
return top
my.server = Search.get_by_code("sthpw/sync_server", my.server_code)
my.base_dir = my.server.get_value("base_dir")
title_wdg = DivWdg()
top.add( title_wdg )
title_wdg.add( my.server_code )
title_wdg.add_style("padding: 10px")
title_wdg.add_color("background", "background", -10)
title_wdg.add_border()
title_wdg.add_style("margin: -6px -6px 10px -6px")
title_wdg.add_style("font-weight: bold")
description = my.server.get_value("description")
title_wdg.add(" <i style='font-size: 9px; opacity: 0.5'>( %s )</i>" % description )
from tactic.ui.container import TabWdg
tab = TabWdg(selected="Info", show_add=False)
top.add(tab)
tab.add(my.get_info_wdg())
tab.add(my.get_files_wdg())
tab.add( my.get_tools_wdg() )
return top
def get_info_wdg(my):
div = DivWdg()
div.set_name("Info")
div.add_style("padding: 20px")
table = Table()
div.add(table)
table.add_color("color", "color")
#table.add_style("height: 280px")
table.set_unique_id()
table.add_smart_style("spt_table_header", "width", "200px")
table.add_smart_style("spt_table_header", "text-align", "right")
table.add_smart_style("spt_table_header", "padding-right", "20px")
table.add_smart_style("spt_table_header", "margin-bottom", "10px")
table.add_smart_style("spt_table_element", "vertical-align", "top")
#if my.mode == 'insert':
# read_only = False
#else:
# read_only = True
read_only = False
code = my.server.get_code() or ""
description = my.server.get_value("description") or ""
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Code: ")
td.add_style("vertical-align: top")
text = TextInputWdg(name="code", read_only=read_only)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(code)
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Description: ")
td.add_style("vertical-align: top")
text = TextAreaWdg(name="description", read_only=read_only)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(description)
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Base Directory: ")
td.add_style("vertical-align: top")
text = TextInputWdg(name="base_dir", read_only=read_only)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(my.base_dir)
return div
def get_files_wdg(my):
div = DivWdg()
div.set_name("Files")
div.add_style("padding: 10px")
shelf_wdg = DivWdg()
div.add(shelf_wdg)
shelf_wdg.add_style("height: 25px")
shelf_wdg.add_style("padding: 5px")
shelf_wdg.add_border()
shelf_wdg.add_color("background", "background3")
shelf_wdg.add_style("margin: 0px -11px 10px -11px")
project_code = Project.get_project_code()
share_code = my.server.get_code()
# NOT supported yet
base_dir = my.server.get_value("base_dir")
imports_dir = "%s/imports" % base_dir
#import datetime
#now = datetime.datetime.now()
#version = now.strftime("%Y%m%d_%H%M%S")
button = ActionButtonWdg(title="Export")
shelf_wdg.add(button)
button.add_behavior( {
'type': 'click_up',
'project_code': project_code,
'share_code': share_code,
'imports_dir': imports_dir,
'cbjs_action': '''
var class_name = 'tactic.ui.sync.SyncCreateTemplateCmd'
var kwargs = {
server: bvr.share_code,
project_code: bvr.project_code,
base_dir: bvr.imports_dir,
}
spt.app_busy.show("Exporting project ...");
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
var top = bvr.src_el.getParent(".spt_share_item");
spt.panel.refresh(top);
spt.app_busy.hide();
'''
})
from tactic.ui.app import PluginDirListWdg
dir_list = PluginDirListWdg(base_dir=my.base_dir, location="server")
#from tactic.ui.widget import DirListWdg
#dir_list = DirListWdg(base_dir=my.base_dir, location="server")
div.add(dir_list)
return div
def get_tools_wdg(my):
div = DivWdg()
div.set_name("Tools")
div.add_style("padding: 10px")
div.add("This tool will export out a version of the project")
button = ActionButtonWdg(title="Export")
div.add(button)
button.add_behavior( {
'type': 'click_up',
'server': my.server_code,
'cbjs_action': '''
var class_name = 'tactic.ui.sync.SyncCreateTemplateCmd';
var kwargs = {
server: bvr.server
}
spt.app_busy.show("Exporting project ...");
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
spt.app_busy.hide();
spt.panel.refresh(bvr.src_el);
'''
} )
return div
class ShareItemCbk(Command):
def execute(my):
action = my.kwargs.get("action")
if action == "delete":
my.delete()
def delete(my):
code = my.kwargs.get("code")
assert code
server = Search.get_by_code("sthpw/sync_server", code)
print server.get_data()
base_dir = server.get_value("base_dir")
server.delete()
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
|
khchine5/lino-welfare | refs/heads/master | lino_welfare/modlib/isip/choicelists.py | 1 | # -*- coding: UTF-8 -*-
# Copyright 2013-2017 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""Choicelists for `lino_welfare.modlib.isip`.
"""
from lino.api import dd, _
class ContractEvents(dd.ChoiceList):
verbose_name = _("Observed event")
verbose_name_plural = _("Observed events")
add = ContractEvents.add_item
add('10', _("Active"), 'active')
add('20', _("Started"), 'started')
add('30', _("Ended"), 'ended')
add('40', _("Decided"), 'decided')
add('50', _("Issued"), 'issued')
class OverlapGroups(dd.ChoiceList):
"""The list of all known overlap groups to be selected for the
:attr:`overlap_group
<lino_welfare.modlib.isip.mixins.ContractTypeBase.overlap_group>`
of a contract type.
"""
verbose_name = _("Overlap group")
verbose_name_plural = _("Overlap groups")
add = OverlapGroups.add_item
add('10', _("Conventions"), 'contracts')
add('20', _("Trainings"), 'trainings')
|
cod3monk/kerncraft | refs/heads/master | tests/test_example_files.py | 2 | #!/usr/bin/env python3
"""
Tests for validity of example files.
"""
import os
from glob import glob
import unittest
from kerncraft import machinemodel, kernel
class TestExampleFiles(unittest.TestCase):
@staticmethod
def _find_file(name):
testdir = os.path.dirname(__file__)
name = os.path.join(testdir, 'test_files', name)
assert os.path.exists(name)
return name
def setUp(self):
self.default_machine = machinemodel.MachineModel(self._find_file('SandyBridgeEP_E5-2680.yml'))
def test_kernelfiles(self):
kernel_files_glob = os.path.join(os.path.dirname(__file__), '../examples/kernels/', '*.c')
for kernel_path in glob(kernel_files_glob):
with self.subTest(kernel_path):
with open(kernel_path) as kernel_code:
kernel.KernelCode(kernel_code.read(), self.default_machine,
filename=kernel_path)
def test_machinefiles(self):
machine_files_glob = os.path.join(os.path.dirname(__file__), '../examples/machine-files/', '*.y*ml')
for yml_path in glob(machine_files_glob):
with self.subTest(yml_path=yml_path):
machinemodel.MachineModel(yml_path)
|
jcchin/Hyperloop_v2 | refs/heads/master | src/hyperloop/Python/pod/pod_mach.py | 4 | """
Estimates tube diameter, inlet diameter, and compressor power
Will optimize some sort of cost function based on pod mach number
Many parameters are currently taken from hyperloop alpha, pod sizing analysis
"""
from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp
class PodMach(Component):
"""
Notes
------
Uses isentropic mach-area relationships to determine the cross sectional area of the tube to prevent choking and super sonic flow.
Takes pod mach number and tunnel pressure from user, then takes pod area and bloackage factor from geometry.s
Params
------
gam : float
Ratio of specific heats. Default value is 1.4
R : float
Ideal gas constant. Default valut is 287 J/(m*K).
A_pod : float
cross sectional area of the pod. Default value is 1.4 m**2. Value will be taken from pod geometry module
comp_inlet_area : float
Inlet area of compressor. (m**2)
L : float
Pod length. Default value is 22 m. Value will be taken from pod geometry module
prc : float
Pressure ratio across compressor inlet and outlet. Default value is 12.5. Value will be taken from NPSS
p_tube : float
Pressure of air in tube. Default value is 850 Pa. Value will come from vacuum component
T_ambient : float
Tunnel ambient temperature. Default value is 298 K.
mu : float
Fluid dynamic viscosity. Default value is 1.846e-5 kg/(m*s)
M_duct : float
Maximum Mach number allowed in the duct. Default value is .95
M_diff : float
Maximum Mach number allowed at compressor inlet. Default valu is .6
cp : float
Specific heat of fluid. Default value is 1009 J/(kg*K)
M_pod : float
pod Mach number. Default value is .8
Returns
-------
A_tube : float
will return optimal tunnel area based on pod Mach number
pwr_comp : float
will return the power that needs to be delivered to the flow by the compressor. Does not account for compressor efficiency
A_bypass : float
will return area of that the flow must go through to bypass pod
A_inlet : float
returns area of the inlet necessary to slow the flow down to M_diffuser
A_duct_eff : float
returns effective duct area which accounts for displacement boundary layer thickness approximation
A_diff : float
returns area of diffuser outlet
Re : float
returns free stream Reynolds number
"""
def __init__(self):
super(PodMach, self).__init__()
self.add_param('gam', val=1.4, desc='ratio of specific heats')
self.add_param('R',
val=287.0,
units='J/(kg*K)',
desc='Ideal gas constant')
self.add_param('comp_inlet_area', 2.3884, desc = 'compressor inlet area', units = 'm**2')
self.add_param('A_pod', val=3.0536, units='m**2', desc='pod area')
self.add_param('L', val=20.5, units='m', desc='pod length')
self.add_param('prc',
val=12.5,
units='m**2',
desc='pressure ratio of a compressor')
self.add_param('p_tube',
val=850.0,
units='Pa',
desc='ambient pressure')
self.add_param('T_ambient',
val=298.0,
units='K',
desc='ambient temperature')
self.add_param('mu',
val=1.846e-5,
units='kg/(m*s)',
desc='dynamic viscosity')
self.add_param('M_duct', val=.95, desc='maximum pod mach number')
self.add_param(
'M_diff',
val=.6,
desc='maximum pod mach number befor entering the compressor')
self.add_param('cp',
val=1009.0,
units='J/(kg*K)',
desc='specific heat')
# self.add_param('delta_star',
# val=..14,
# units='m',
# desc='Boundary layer displacement thickness')
self.add_param('M_pod', val=.8, desc='pod mach number')
self.add_output('pwr_comp',
val=0.0,
units='W',
desc='Compressor Power')
self.add_output('A_inlet',
val=0.0,
units='m**2',
desc='Pod inlet area')
self.add_output('A_tube', val=0.0, units='m**2', desc='tube area')
self.add_output('A_bypass', val=0.0, units='m**2', desc='bypass area')
self.add_output('A_duct_eff',
val=0.0,
units='m**2',
desc='effective duct area')
self.add_output('A_diff',
val=0.0,
units='m**2',
desc='Area after diffuser')
self.add_output('Re', val=0.0, desc='Reynolds Number')
def solve_nonlinear(self, params, unknowns, resids):
gam = params['gam']
comp_inlet_area = params['comp_inlet_area']
A_pod = params['A_pod']
L = params['L']
prc = params['prc']
p_tube = params['p_tube']
R = params['R']
T_ambient = params['T_ambient']
mu = params['mu']
M_duct = params['M_duct']
M_diff = params['M_diff']
cp = params['cp']
#delta_star = params['delta_star']
M_pod = params['M_pod']
def mach_to_area(M1, M2, gam):
'''(A2/A1) = f(M2)/f(M1) where f(M) = (1/M)*((2/(gam+1))*(1+((gam-1)/2)*M**2))**((gam+1)/(2*(gam-1)))'''
A_ratio = (M1 / M2) * (((1.0 + ((gam - 1.0) / 2.0) * (M2**2.0)) /
(1.0 + ((gam - 1.0) / 2.0) * (M1**2.0)))**(
(gam + 1.0) / (2.0 * (gam - 1.0))))
return A_ratio
#Define intermediate variables
rho_inf = p_tube / (R *
T_ambient) #Calculate density of free stream flow
U_inf = M_pod * (np.sqrt((gam * R * T_ambient))) #Calculate velocity of free stream flow
r_pod = np.sqrt((A_pod / np.pi)) #Calculate pod radius
Re = (rho_inf * U_inf *
L) / mu #Calculate length based Reynolds Number
delta_star = (.04775*L)/(Re**.2) #Calculate displacement boundary layer thickness
BF = comp_inlet_area/A_pod #Calculate diffuser based blockage factor
A_diff = BF * A_pod #Calculate diffuser output area based on blockage factor input
#Calculate inlet area. Inlet is necessary if free stream Mach number is greater than max compressore mach number M_diff
if M_pod > M_diff:
A_inlet = A_diff * mach_to_area(M_diff, M_pod, gam)
else:
A_inlet = A_diff
eps = mach_to_area(M_pod, M_duct, gam)
A_tube = (A_pod + np.pi * (((r_pod + delta_star)**2.0) - (r_pod**2.0)) -
(eps * A_inlet)) / ((1.0 + (np.sqrt(eps))) * (1.0 - (np.sqrt(eps))))
pwr_comp = (rho_inf * U_inf * A_inlet) * cp * T_ambient * (1.0 + (
(gam - 1) / 2.0) * (M_pod**2)) * ((prc**((gam - 1) / gam)) - 1)
A_bypass = A_tube - A_inlet
A_duct_eff = A_tube - A_pod - np.pi * ((
(r_pod + delta_star)**2) - (r_pod**2))
unknowns['pwr_comp'] = pwr_comp
unknowns['A_inlet'] = A_inlet
unknowns['A_tube'] = A_tube
unknowns['A_bypass'] = A_bypass
unknowns['A_duct_eff'] = A_duct_eff
unknowns['A_diff'] = A_diff
unknowns['Re'] = Re
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (('M_pod', .8), ('gam', 1.4))
root.add('input_vars', IndepVarComp(params))
root.add('p', PodMach())
root.connect('input_vars.M_pod', 'p.M_pod')
root.connect('input_vars.gam', 'p.gam')
root.deriv_options['type'] = 'fd'
root.deriv_options['form'] = 'central'
root.deriv_options['step_size'] = 1.0e-8
top.setup()
top.run()
print('\n')
print('Pod Mach number is %f' % top['p.M_pod'])
print('Area of the tube is %f m^2' % top['p.A_tube'])
print('Compressor power is %f W' % top['p.pwr_comp'])
print('Area of the inlet is %f m^2' % top['p.A_inlet'])
print('Area after diffuser is %f m^2' % top['p.A_diff'])
print('Bypass area is %f m^2' % top['p.A_bypass'])
print('Effective duct area is %f m^2' % top['p.A_duct_eff'])
print('Reynolds number is %f' % top['p.Re'])
|
ettm2012/MissionPlanner | refs/heads/master | Lib/encodings/cp1252.py | 93 | """ Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
libratbag/piper | refs/heads/master | piper/ledspage.py | 1 | # SPDX-License-Identifier: GPL-2.0-or-later
from gettext import gettext as _
from .leddialog import LedDialog
from .mousemap import MouseMap
from .optionbutton import OptionButton
from .ratbagd import RatbagdLed
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk # noqa
class LedsPage(Gtk.Box):
"""The third stack page, exposing the LED configuration."""
__gtype_name__ = "LedsPage"
def __init__(self, ratbagd_device, *args, **kwargs):
"""Instantiates a new LedsPage.
@param ratbag_device The ratbag device to configure, as
ratbagd.RatbagdDevice
"""
Gtk.Box.__init__(self, *args, **kwargs)
self._device = ratbagd_device
self._device.connect("active-profile-changed",
self._on_active_profile_changed)
self._profile = None
self._mousemap = MouseMap("#Leds", self._device, spacing=20, border_width=20)
self.pack_start(self._mousemap, True, True, 0)
self._sizegroup = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
self._set_profile(self._device.active_profile)
self.show_all()
def _set_profile(self, profile):
self._profile = profile
for led in profile.leds:
mode = _(RatbagdLed.LED_DESCRIPTION[led.mode])
button = OptionButton(mode)
button.connect("clicked", self._on_button_clicked, led)
led.connect("notify::mode", self._on_led_mode_changed, button)
self._mousemap.add(button, "#led{}".format(led.index))
self._sizegroup.add_widget(button)
def _on_active_profile_changed(self, device, profile):
# Disconnect the notify::mode signal on the old profile's LEDs.
for led in self._profile.leds:
led.disconnect_by_func(self._on_led_mode_changed)
# Clear the MouseMap of any children.
self._mousemap.foreach(Gtk.Widget.destroy)
# Repopulate the MouseMap.
self._set_profile(profile)
def _on_led_mode_changed(self, led, pspec, button):
mode = _(RatbagdLed.LED_DESCRIPTION[led.mode])
button.set_label(mode)
def _on_button_clicked(self, button, led):
# Presents the LedDialog to configure the LED corresponding to the
# clicked button.
dialog = LedDialog(led, transient_for=self.get_toplevel())
dialog.connect("response", self._on_dialog_response, led)
dialog.present()
def _on_dialog_response(self, dialog, response, led):
# The user either pressed cancel or apply. If it's apply, apply the
# changes before closing the dialog, otherwise just close the dialog.
if response == Gtk.ResponseType.APPLY:
led.mode = dialog.mode
led.color = dialog.color
led.brightness = dialog.brightness
led.effect_duration = dialog.effect_duration
dialog.destroy()
|
rodrigob/fuel | refs/heads/master | tests/test_streams.py | 21 | import numpy
from numpy.testing import assert_equal, assert_raises
from fuel.datasets import IterableDataset, IndexableDataset
from fuel.schemes import SequentialExampleScheme, SequentialScheme
from fuel.streams import AbstractDataStream, DataStream
class DummyDataStream(AbstractDataStream):
def reset(self):
pass
def close(self):
pass
def next_epoch(self):
pass
def get_epoch_iterator(self, as_dict=False):
pass
def get_data(self, request=None):
pass
class TestAbstractDataStream(object):
def test_raises_value_error_on_no_scheme_no_produces_examples(self):
stream = DummyDataStream()
assert_raises(ValueError, getattr, stream, 'produces_examples')
def test_raises_value_error_when_setting_produces_examples_if_scheme(self):
stream = DummyDataStream(SequentialExampleScheme(2))
assert_raises(ValueError, setattr, stream, 'produces_examples', True)
class TestDataStream(object):
def setUp(self):
self.dataset = IterableDataset(numpy.eye(2))
def test_sources_setter(self):
stream = DataStream(self.dataset)
stream.sources = ('features',)
assert_equal(stream.sources, ('features',))
def test_no_axis_labels(self):
stream = DataStream(self.dataset)
assert stream.axis_labels is None
def test_axis_labels_on_produces_examples(self):
axis_labels = {'data': ('batch', 'features')}
self.dataset.axis_labels = axis_labels
stream = DataStream(self.dataset)
assert_equal(stream.axis_labels, {'data': ('features',)})
def test_axis_labels_on_produces_batches(self):
dataset = IndexableDataset(numpy.eye(2))
axis_labels = {'data': ('batch', 'features')}
dataset.axis_labels = axis_labels
stream = DataStream(dataset, iteration_scheme=SequentialScheme(2, 2))
assert_equal(stream.axis_labels, axis_labels)
def test_produces_examples(self):
stream = DataStream(self.dataset,
iteration_scheme=SequentialExampleScheme(2))
assert stream.produces_examples
|
ChromiumWebApps/chromium | refs/heads/master | chrome/test/mini_installer/test_installer.py | 27 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script tests the installer with test cases specified in the config file.
For each test case, it checks that the machine states after the execution of
each command match the expected machine states. For more details, take a look at
the design documentation at http://goo.gl/Q0rGM6
"""
import json
import optparse
import os
import subprocess
import sys
import unittest
from variable_expander import VariableExpander
import verifier_runner
class Config:
"""Describes the machine states, actions, and test cases.
Attributes:
states: A dictionary where each key is a state name and the associated value
is a property dictionary describing that state.
actions: A dictionary where each key is an action name and the associated
value is the action's command.
tests: An array of test cases.
"""
def __init__(self):
self.states = {}
self.actions = {}
self.tests = []
class InstallerTest(unittest.TestCase):
"""Tests a test case in the config file."""
def __init__(self, test, config, variable_expander):
"""Constructor.
Args:
test: An array of alternating state names and action names, starting and
ending with state names.
config: The Config object.
variable_expander: A VariableExpander object.
"""
super(InstallerTest, self).__init__()
self._test = test
self._config = config
self._variable_expander = variable_expander
self._verifier_runner = verifier_runner.VerifierRunner()
self._clean_on_teardown = True
def __str__(self):
"""Returns a string representing the test case.
Returns:
A string created by joining state names and action names together with
' -> ', for example, 'Test: clean -> install chrome -> chrome_installed'.
"""
return 'Test: %s\n' % (' -> '.join(self._test))
def runTest(self):
"""Run the test case."""
# |test| is an array of alternating state names and action names, starting
# and ending with state names. Therefore, its length must be odd.
self.assertEqual(1, len(self._test) % 2,
'The length of test array must be odd')
state = self._test[0]
self._VerifyState(state)
# Starting at index 1, we loop through pairs of (action, state).
for i in range(1, len(self._test), 2):
action = self._test[i]
RunCommand(self._config.actions[action], self._variable_expander)
state = self._test[i + 1]
self._VerifyState(state)
# If the test makes it here, it means it was successful, because RunCommand
# and _VerifyState throw an exception on failure.
self._clean_on_teardown = False
def tearDown(self):
"""Cleans up the machine if the test case fails."""
if self._clean_on_teardown:
RunCleanCommand(True, self._variable_expander)
def shortDescription(self):
"""Overridden from unittest.TestCase.
We return None as the short description to suppress its printing.
The default implementation of this method returns the docstring of the
runTest method, which is not useful since it's the same for every test case.
The description from the __str__ method is informative enough.
"""
return None
def _VerifyState(self, state):
"""Verifies that the current machine state matches a given state.
Args:
state: A state name.
"""
try:
self._verifier_runner.VerifyAll(self._config.states[state],
self._variable_expander)
except AssertionError as e:
# If an AssertionError occurs, we intercept it and add the state name
# to the error message so that we know where the test fails.
raise AssertionError("In state '%s', %s" % (state, e))
def RunCommand(command, variable_expander):
"""Runs the given command from the current file's directory.
This function throws an Exception if the command returns with non-zero exit
status.
Args:
command: A command to run. It is expanded using Expand.
variable_expander: A VariableExpander object.
"""
expanded_command = variable_expander.Expand(command)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_status = subprocess.call(expanded_command, shell=True, cwd=script_dir)
if exit_status != 0:
raise Exception('Command %s returned non-zero exit status %s' % (
expanded_command, exit_status))
def RunCleanCommand(force_clean, variable_expander):
"""Puts the machine in the clean state (i.e. Chrome not installed).
Args:
force_clean: A boolean indicating whether to force cleaning existing
installations.
variable_expander: A VariableExpander object.
"""
# TODO(sukolsak): Read the clean state from the config file and clean
# the machine according to it.
# TODO(sukolsak): Handle Chrome SxS installs.
commands = []
interactive_option = '--interactive' if not force_clean else ''
for level_option in ('', '--system-level'):
commands.append('python uninstall_chrome.py '
'--chrome-long-name="$CHROME_LONG_NAME" '
'--no-error-if-absent %s %s' %
(level_option, interactive_option))
RunCommand(' && '.join(commands), variable_expander)
def MergePropertyDictionaries(current_property, new_property):
"""Merges the new property dictionary into the current property dictionary.
This is different from general dictionary merging in that, in case there are
keys with the same name, we merge values together in the first level, and we
override earlier values in the second level. For more details, take a look at
http://goo.gl/uE0RoR
Args:
current_property: The property dictionary to be modified.
new_property: The new property dictionary.
"""
for key, value in new_property.iteritems():
if key not in current_property:
current_property[key] = value
else:
assert(isinstance(current_property[key], dict) and
isinstance(value, dict))
# This merges two dictionaries together. In case there are keys with
# the same name, the latter will override the former.
current_property[key] = dict(
current_property[key].items() + value.items())
def ParsePropertyFiles(directory, filenames):
"""Parses an array of .prop files.
Args:
property_filenames: An array of Property filenames.
directory: The directory where the Config file and all Property files
reside in.
Returns:
A property dictionary created by merging all property dictionaries specified
in the array.
"""
current_property = {}
for filename in filenames:
path = os.path.join(directory, filename)
new_property = json.load(open(path))
MergePropertyDictionaries(current_property, new_property)
return current_property
def ParseConfigFile(filename):
"""Parses a .config file.
Args:
config_filename: A Config filename.
Returns:
A Config object.
"""
config_data = json.load(open(filename, 'r'))
directory = os.path.dirname(os.path.abspath(filename))
config = Config()
config.tests = config_data['tests']
for state_name, state_property_filenames in config_data['states']:
config.states[state_name] = ParsePropertyFiles(directory,
state_property_filenames)
for action_name, action_command in config_data['actions']:
config.actions[action_name] = action_command
return config
def RunTests(mini_installer_path, config, force_clean):
"""Tests the installer using the given Config object.
Args:
mini_installer_path: The path to mini_installer.exe.
config: A Config object.
force_clean: A boolean indicating whether to force cleaning existing
installations.
Returns:
True if all the tests passed, or False otherwise.
"""
suite = unittest.TestSuite()
variable_expander = VariableExpander(mini_installer_path)
RunCleanCommand(force_clean, variable_expander)
for test in config.tests:
suite.addTest(InstallerTest(test, config, variable_expander))
result = unittest.TextTestRunner(verbosity=2).run(suite)
return result.wasSuccessful()
def main():
usage = 'usage: %prog [options] config_filename'
parser = optparse.OptionParser(usage, description='Test the installer.')
parser.add_option('--build-dir', default='out',
help='Path to main build directory (the parent of the '
'Release or Debug directory)')
parser.add_option('--target', default='Release',
help='Build target (Release or Debug)')
parser.add_option('--force-clean', action='store_true', dest='force_clean',
default=False, help='Force cleaning existing installations')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
config_filename = args[0]
mini_installer_path = os.path.join(options.build_dir, options.target,
'mini_installer.exe')
assert os.path.exists(mini_installer_path), ('Could not find file %s' %
mini_installer_path)
config = ParseConfigFile(config_filename)
if not RunTests(mini_installer_path, config, options.force_clean):
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
harkishan81001/py-instrumenting | refs/heads/master | pytracing/trace.py | 1 | import json
from django.conf import settings
from py_zipkin.zipkin import zipkin_span
from py_zipkin.zipkin import create_attrs_for_span
from py_zipkin.zipkin import ZipkinAttrs
from py_zipkin.util import generate_random_64bit_string
from py_zipkin.util import generate_random_128bit_string
from .transporter import transport_handler
from . import constants
class Trace(object):
zipkin_context = None
extras = {}
def __init__(self):
self.request = None
self.operation_name = None
self.extras = {}
def __get_request_headers(self):
headers = {}
for k, v in self.request.META.iteritems():
if k.startswith('HTTP'):
headers[k] = v
return headers
def __get_host(self):
return self.request.META[constants.REMOTE_ADDR]
def __get_port(self):
return int(self.request.META[constants.SERVER_PORT])
def __get_query_params(self):
return self.request.META.get(constants.QUERY_STRING)
def __get_uri(self):
return self.request.META[constants.PATH_INFO]
def __get_parent_span_id(self):
return self.request.META.get(
constants.HTTP_X_B3_PARENTSPANID, None)
def __get_trace_id(self):
return self.request.META.get(
constants.HTTP_X_B3_TRACEID, None)
def __get_flags(self):
return self.request.META.get(
constants.HTTP_X_B3_FLAGS, '0')
def __is_sampled(self):
return self.request.META.get(
constants.IS_SAMPLED, 'false') == 'true',
def __get_span_attrs(self, use_128bit_trace_id=False):
parent_span_id = self.__get_parent_span_id()
trace_id = self.__get_trace_id()
if trace_id is None:
if use_128bit_trace_id:
trace_id = generate_random_128bit_string()
else:
trace_id = generate_random_64bit_string()
is_sampled = self.__is_sampled()
span_id = generate_random_64bit_string()
return ZipkinAttrs(
trace_id=trace_id,
span_id=span_id,
parent_span_id=parent_span_id,
flags=self.__get_flags(),
is_sampled=is_sampled,
)
def __operation_name(self):
operation_name = self.__get_uri()
return operation_name
def start(self, request, operation_name=None, *args, **kwargs):
self.request = request
if not operation_name:
operation_name = self.__operation_name()
attrs = self.__get_span_attrs(self.request)
context = zipkin_span(
service_name=settings.ZIPKIN_SERVICE_NAME,
span_name=operation_name,
zipkin_attrs=attrs,
transport_handler=transport_handler,
host=self.__get_host(),
port=self.__get_port(),
sample_rate=settings.ZIPKIN_SAMPLE_RATE
)
self.zipkin_context = context
def set_operation_details(self, view_func, view_args, view_kwargs):
if hasattr(view_func, 'func_name'):
self.extras.update(**{
constants.ANNOTATION_DJANGO_VIEW_FUNC_NAME: (
view_func.func_name)}
)
if hasattr(view_func, 'im_class'):
self.extras.update(**{
constants.ANNOTATION_DJANGO_VIEW_CLASS: (
view_func.im_class.__name__)}
)
if hasattr(view_func, 'im_func'):
self.extras.update(**{
constants.ANNOTATION_DJANGO_VIEW_FUNC_NAME: (
view_func.im_func.func_name)}
)
self.extras.update(**{
constants.ANNOTATION_DJANGO_VIEW_ARGS: json.dumps(view_args)})
self.extras.update(**{
constants.ANNOTATION_DJANGO_VIEW_KWARGS: json.dumps(view_kwargs)})
def finish(self, response):
if not self.zipkin_context:
return
with self.zipkin_context:
self.zipkin_context.update_binary_annotations({
constants.ANNOTATION_HTTP_URI: self.__get_uri(),
constants.ANNOTATION_HTTP_HOST: self.__get_host(),
constants.ANNOTATION_HTTP_HEADERS: self.__get_request_headers(),
constants.ANNOTATION_HTTP_QUERY: self.__get_query_params(),
constants.ANNOTATION_HTTP_STATUSCODE: response.status_code,
})
self.zipkin_context.update_binary_annotations(self.extras)
return
|
SerCeMan/intellij-community | refs/heads/master | python/testData/completion/mro.after.py | 83 | class C(object):
pass
C.__mro__ |
deepaklukose/grpc | refs/heads/master | src/python/grpcio_tests/tests/testing/_application_testing_common.py | 39 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc_testing
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
# TODO(https://github.com/grpc/grpc/issues/11657): Eliminate this entirely.
# TODO(https://github.com/google/protobuf/issues/3452): Eliminate this if/else.
if services_pb2.DESCRIPTOR.services_by_name.get('FirstService') is None:
FIRST_SERVICE = 'Fix protobuf issue 3452!'
FIRST_SERVICE_UNUN = 'Fix protobuf issue 3452!'
FIRST_SERVICE_UNSTRE = 'Fix protobuf issue 3452!'
FIRST_SERVICE_STREUN = 'Fix protobuf issue 3452!'
FIRST_SERVICE_STRESTRE = 'Fix protobuf issue 3452!'
else:
FIRST_SERVICE = services_pb2.DESCRIPTOR.services_by_name['FirstService']
FIRST_SERVICE_UNUN = FIRST_SERVICE.methods_by_name['UnUn']
FIRST_SERVICE_UNSTRE = FIRST_SERVICE.methods_by_name['UnStre']
FIRST_SERVICE_STREUN = FIRST_SERVICE.methods_by_name['StreUn']
FIRST_SERVICE_STRESTRE = FIRST_SERVICE.methods_by_name['StreStre']
|
bjwbell/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py | 636 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
|
hfeeki/transifex | refs/heads/master | transifex/txcommon/tests/testmaker/__init__.py | 3 | from projects import *
|
NcLang/vimrc | refs/heads/master | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/python-future/src/future/backports/html/parser.py | 70 | """A parser for HTML and XHTML.
Backported for python-future from Python 3.3.
"""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from future.backports import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from future.backports.html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s)
|
ramielrowe/magnum | refs/heads/master | magnum/common/pythonk8sclient/client/models/V1beta3_EndpointSubset.py | 15 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_EndpointSubset(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'addresses': 'list[V1beta3_EndpointAddress]',
'ports': 'list[V1beta3_EndpointPort]'
}
self.attributeMap = {
'addresses': 'addresses',
'ports': 'ports'
}
#IP addresses which offer the related ports
self.addresses = None # list[V1beta3_EndpointAddress]
#port numbers available on the related IP addresses
self.ports = None # list[V1beta3_EndpointPort]
|
jaysonsantos/servo | refs/heads/master | tests/wpt/web-platform-tests/old-tests/webdriver/navigation/forward.py | 142 | import unittest
import sys
import os
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class ForwardTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_forward(self):
self.driver.get(self.webserver.where_is('navigation/res/forwardStart.html'))
self.driver.get(self.webserver.where_is('navigation/res/forwardNext.html'))
nextbody = self.driver.find_element_by_css_selector("body").text
self.driver.back()
currbody = self.driver.find_element_by_css_selector("body").text
self.assertNotEqual(nextbody, currbody)
self.driver.forward()
currbody = self.driver.find_element_by_css_selector("body").text
self.assertEqual(nextbody, currbody)
if __name__ == '__main__':
unittest.main()
|
lyceel/engine | refs/heads/master | testing/legion/jsonrpclib.py | 47 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to implement the JSON-RPC protocol.
This module uses xmlrpclib as the base and only overrides those
portions that implement the XML-RPC protocol. These portions are rewritten
to use the JSON-RPC protocol instead.
When large portions of code need to be rewritten the original code and
comments are preserved. The intention here is to keep the amount of code
change to a minimum.
This module only depends on default Python modules. No third party code is
required to use this module.
"""
import json
import urllib
import xmlrpclib as _base
__version__ = '1.0.0'
gzip_encode = _base.gzip_encode
gzip = _base.gzip
class Error(Exception):
def __str__(self):
return repr(self)
class ProtocolError(Error):
"""Indicates a JSON protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
'<ProtocolError for %s: %s %s>' %
(self.url, self.errcode, self.errmsg))
class ResponseError(Error):
"""Indicates a broken response package."""
pass
class Fault(Error):
"""Indicates a JSON-RPC fault package."""
def __init__(self, code, message):
Error.__init__(self)
if not isinstance(code, int):
raise ProtocolError('Fault code must be an integer.')
self.code = code
self.message = message
def __repr__(self):
return (
'<Fault %s: %s>' %
(self.code, repr(self.message))
)
def CreateRequest(methodname, params, ident=''):
"""Create a valid JSON-RPC request.
Args:
methodname: The name of the remote method to invoke.
params: The parameters to pass to the remote method. This should be a
list or tuple and able to be encoded by the default JSON parser.
Returns:
A valid JSON-RPC request object.
"""
request = {
'jsonrpc': '2.0',
'method': methodname,
'params': params,
'id': ident
}
return request
def CreateRequestString(methodname, params, ident=''):
"""Create a valid JSON-RPC request string.
Args:
methodname: The name of the remote method to invoke.
params: The parameters to pass to the remote method.
These parameters need to be encode-able by the default JSON parser.
ident: The request identifier.
Returns:
A valid JSON-RPC request string.
"""
return json.dumps(CreateRequest(methodname, params, ident))
def CreateResponse(data, ident):
"""Create a JSON-RPC response.
Args:
data: The data to return.
ident: The response identifier.
Returns:
A valid JSON-RPC response object.
"""
if isinstance(data, Fault):
response = {
'jsonrpc': '2.0',
'error': {
'code': data.code,
'message': data.message},
'id': ident
}
else:
response = {
'jsonrpc': '2.0',
'response': data,
'id': ident
}
return response
def CreateResponseString(data, ident):
"""Create a JSON-RPC response string.
Args:
data: The data to return.
ident: The response identifier.
Returns:
A valid JSON-RPC response object.
"""
return json.dumps(CreateResponse(data, ident))
def ParseHTTPResponse(response):
"""Parse an HTTP response object and return the JSON object.
Args:
response: An HTTP response object.
Returns:
The returned JSON-RPC object.
Raises:
ProtocolError: if the object format is not correct.
Fault: If a Fault error is returned from the server.
"""
# Check for new http response object, else it is a file object
if hasattr(response, 'getheader'):
if response.getheader('Content-Encoding', '') == 'gzip':
stream = _base.GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
data = ''
while 1:
chunk = stream.read(1024)
if not chunk:
break
data += chunk
response = json.loads(data)
ValidateBasicJSONRPCData(response)
if 'response' in response:
ValidateResponse(response)
return response['response']
elif 'error' in response:
ValidateError(response)
code = response['error']['code']
message = response['error']['message']
raise Fault(code, message)
else:
raise ProtocolError('No valid JSON returned')
def ValidateRequest(data):
"""Validate a JSON-RPC request object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if 'method' not in data or 'params' not in data:
raise ProtocolError('JSON is not a valid request')
def ValidateResponse(data):
"""Validate a JSON-RPC response object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if 'response' not in data:
raise ProtocolError('JSON is not a valid response')
def ValidateError(data):
"""Validate a JSON-RPC error object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if ('error' not in data or
'code' not in data['error'] or
'message' not in data['error']):
raise ProtocolError('JSON is not a valid error response')
def ValidateBasicJSONRPCData(data):
"""Validate a basic JSON-RPC object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
error = None
if not isinstance(data, dict):
error = 'JSON data is not a dictionary'
elif 'jsonrpc' not in data or data['jsonrpc'] != '2.0':
error = 'JSON is not a valid JSON RPC 2.0 message'
elif 'id' not in data:
error = 'JSON data missing required id entry'
if error:
raise ProtocolError(error)
class Transport(_base.Transport):
"""RPC transport class.
This class extends the functionality of xmlrpclib.Transport and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
user_agent = 'jsonrpclib.py/' + __version__
def send_content(self, connection, request_body):
"""Send the request."""
connection.putheader('Content-Type','application/json')
#optionally encode the request
if (self.encode_threshold is not None and
self.encode_threshold < len(request_body) and
gzip):
connection.putheader('Content-Encoding', 'gzip')
request_body = gzip_encode(request_body)
connection.putheader('Content-Length', str(len(request_body)))
connection.endheaders(request_body)
def single_request(self, host, handler, request_body, verbose=0):
"""Issue a single JSON-RPC request."""
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose #pylint: disable=attribute-defined-outside-init
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
# discard any response data and raise exception
if response.getheader('content-length', 0):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def parse_response(self, response):
"""Parse the HTTP resoponse from the server."""
return ParseHTTPResponse(response)
class SafeTransport(_base.SafeTransport):
"""Transport class for HTTPS servers.
This class extends the functionality of xmlrpclib.SafeTransport and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
def parse_response(self, response):
return ParseHTTPResponse(response)
class ServerProxy(_base.ServerProxy):
"""Proxy class to the RPC server.
This class extends the functionality of xmlrpclib.ServerProxy and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, use_datetime=0):
urltype, _ = urllib.splittype(uri)
if urltype not in ('http', 'https'):
raise IOError('unsupported JSON-RPC protocol')
_base.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none, use_datetime)
transport_type, uri = urllib.splittype(uri)
if transport is None:
if transport_type == 'https':
transport = SafeTransport(use_datetime=use_datetime)
else:
transport = Transport(use_datetime=use_datetime)
self.__transport = transport
def __request(self, methodname, params):
"""Call a method on the remote server."""
request = CreateRequestString(methodname, params)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
return response
Server = ServerProxy
|
tarzan0820/addons-yelizariev | refs/heads/8.0 | thecage_data/__openerp__.py | 1 | {
'name' : 'Initialization data',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Veronika Kotovich',
'license': 'GPL-3',
'website' : 'https://twitter.com/vkotovi4',
'category' : 'Other',
'description': """
""",
'depends' : ['l10n_sg',
'pitch_booking',
'sms_sg',
'sale_order_hide_tax',
'sale_contract_slots',
'res_partner_phone',
],
'data':[
'data.xml',
'views/view.xml',
],
'installable': True,
'auto_install': False,
}
|
iulian787/spack | refs/heads/develop | var/spack/repos/builtin/packages/r-colorspace/package.py | 5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RColorspace(RPackage):
"""Carries out mapping between assorted color spaces including RGB, HSV,
HLS, CIEXYZ, CIELUV, HCL (polar CIELUV), CIELAB and polar CIELAB.
Qualitative, sequential, and diverging color palettes based on HCL colors
are provided."""
homepage = "https://cloud.r-project.org/package=colorspace"
url = "https://cloud.r-project.org/src/contrib/colorspace_1.3-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/colorspace"
version('1.4-1', sha256='693d713a050f8bfecdb7322739f04b40d99b55aed168803686e43401d5f0d673')
version('1.4-0', sha256='ce003c5958dd704697959e9dc8a108c8cb568f8d78ece113235732afc5dff556')
version('1.3-2', sha256='dd9fd2342b650456901d014e7ff6d2e201f8bec0b555be63b1a878d2e1513e34')
version('1.2-6', sha256='ba3165c5b906edadcd1c37cad0ef58f780b0af651f3fdeb49fbb2dc825251679')
depends_on('r@3.0.0:', type=('build', 'run'))
|
xfguo/pysnmp | refs/heads/master | pysnmp/smi/mibs/SNMP-TARGET-MIB.py | 1 | # PySNMP SMI module. Autogenerated from smidump -f python SNMP-TARGET-MIB
# by libsmi2pysnmp-0.1.3 at Tue Apr 3 16:05:39 2012,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
from pysnmp.smi import error
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( SnmpAdminString, SnmpMessageProcessingModel, SnmpSecurityLevel, SnmpSecurityModel, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString", "SnmpMessageProcessingModel", "SnmpSecurityLevel", "SnmpSecurityModel")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Counter32, Integer32, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, snmpModules, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "Integer32", "Integer32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "snmpModules")
( RowStatus, StorageType, TAddress, TDomain, TextualConvention, TestAndIncr, TimeInterval, ) = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "StorageType", "TAddress", "TDomain", "TextualConvention", "TestAndIncr", "TimeInterval")
# Types
class SnmpTagList(TextualConvention, OctetString):
displayHint = "255t"
subtypeSpec = OctetString.subtypeSpec+ValueSizeConstraint(0,255)
_delimiters = (' ', '\n', '\t', '\t')
def prettyIn(self, value):
inDelim = True
for v in str(value):
if v in self._delimiters:
if inDelim:
raise error.SmiError('Leading or multiple delimiters not allowed in tag list %r' % value)
inDelim = True
else:
inDelim = False
if value and inDelim:
raise error.SmiError('Dangling delimiter not allowed in tag list %r' % value)
return OctetString.prettyIn(self, value)
class SnmpTagValue(TextualConvention, OctetString):
displayHint = "255t"
subtypeSpec = OctetString.subtypeSpec+ValueSizeConstraint(0,255)
_delimiters = (' ', '\n', '\t', '\t')
def prettyIn(self, value):
for v in str(value):
if v in self._delimiters:
raise error.SmiError('Delimiters not allowed in tag value')
return OctetString.prettyIn(self, value)
# Objects
snmpTargetMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 12)).setRevisions(("2002-10-14 00:00","1998-08-04 00:00","1997-07-14 00:00",))
if mibBuilder.loadTexts: snmpTargetMIB.setOrganization("IETF SNMPv3 Working Group")
if mibBuilder.loadTexts: snmpTargetMIB.setContactInfo("WG-email: snmpv3@lists.tislabs.com\nSubscribe: majordomo@lists.tislabs.com\n In message body: subscribe snmpv3\n\nCo-Chair: Russ Mundy\n Network Associates Laboratories\nPostal: 15204 Omega Drive, Suite 300\n Rockville, MD 20850-4601\n USA\nEMail: mundy@tislabs.com\nPhone: +1 301-947-7107\n\nCo-Chair: David Harrington\n Enterasys Networks\nPostal: 35 Industrial Way\n P. O. Box 5004\n Rochester, New Hampshire 03866-5005\n USA\nEMail: dbh@enterasys.com\nPhone: +1 603-337-2614\n\nCo-editor: David B. Levi\n Nortel Networks\nPostal: 3505 Kesterwood Drive\n Knoxville, Tennessee 37918\nEMail: dlevi@nortelnetworks.com\nPhone: +1 865 686 0432\n\nCo-editor: Paul Meyer\n Secure Computing Corporation\nPostal: 2675 Long Lake Road\n\n Roseville, Minnesota 55113\nEMail: paul_meyer@securecomputing.com\nPhone: +1 651 628 1592\n\nCo-editor: Bob Stewart\n Retired")
if mibBuilder.loadTexts: snmpTargetMIB.setDescription("This MIB module defines MIB objects which provide\nmechanisms to remotely configure the parameters used\nby an SNMP entity for the generation of SNMP messages.\n\nCopyright (C) The Internet Society (2002). This\nversion of this MIB module is part of RFC 3413;\nsee the RFC itself for full legal notices.")
snmpTargetObjects = MibIdentifier((1, 3, 6, 1, 6, 3, 12, 1))
snmpTargetSpinLock = MibScalar((1, 3, 6, 1, 6, 3, 12, 1, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpTargetSpinLock.setDescription("This object is used to facilitate modification of table\nentries in the SNMP-TARGET-MIB module by multiple\nmanagers. In particular, it is useful when modifying\nthe value of the snmpTargetAddrTagList object.\n\nThe procedure for modifying the snmpTargetAddrTagList\nobject is as follows:\n\n 1. Retrieve the value of snmpTargetSpinLock and\n of snmpTargetAddrTagList.\n\n 2. Generate a new value for snmpTargetAddrTagList.\n\n 3. Set the value of snmpTargetSpinLock to the\n retrieved value, and the value of\n snmpTargetAddrTagList to the new value. If\n the set fails for the snmpTargetSpinLock\n object, go back to step 1.")
snmpTargetAddrTable = MibTable((1, 3, 6, 1, 6, 3, 12, 1, 2))
if mibBuilder.loadTexts: snmpTargetAddrTable.setDescription("A table of transport addresses to be used in the generation\nof SNMP messages.")
snmpTargetAddrEntry = MibTableRow((1, 3, 6, 1, 6, 3, 12, 1, 2, 1)).setIndexNames((1, "SNMP-TARGET-MIB", "snmpTargetAddrName"))
if mibBuilder.loadTexts: snmpTargetAddrEntry.setDescription("A transport address to be used in the generation\nof SNMP operations.\n\nEntries in the snmpTargetAddrTable are created and\ndeleted using the snmpTargetAddrRowStatus object.")
snmpTargetAddrName = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: snmpTargetAddrName.setDescription("The locally arbitrary, but unique identifier associated\nwith this snmpTargetAddrEntry.")
snmpTargetAddrTDomain = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 2), TDomain()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrTDomain.setDescription("This object indicates the transport type of the address\ncontained in the snmpTargetAddrTAddress object.")
snmpTargetAddrTAddress = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 3), TAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrTAddress.setDescription("This object contains a transport address. The format of\nthis address depends on the value of the\nsnmpTargetAddrTDomain object.")
snmpTargetAddrTimeout = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 4), TimeInterval().clone('1500')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrTimeout.setDescription("This object should reflect the expected maximum round\ntrip time for communicating with the transport address\ndefined by this row. When a message is sent to this\naddress, and a response (if one is expected) is not\nreceived within this time period, an implementation\nmay assume that the response will not be delivered.\n\nNote that the time interval that an application waits\nfor a response may actually be derived from the value\nof this object. The method for deriving the actual time\ninterval is implementation dependent. One such method\nis to derive the expected round trip time based on a\nparticular retransmission algorithm and on the number\nof timeouts which have occurred. The type of message may\nalso be considered when deriving expected round trip\ntimes for retransmissions. For example, if a message is\nbeing sent with a securityLevel that indicates both\n\nauthentication and privacy, the derived value may be\nincreased to compensate for extra processing time spent\nduring authentication and encryption processing.")
snmpTargetAddrRetryCount = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrRetryCount.setDescription("This object specifies a default number of retries to be\nattempted when a response is not received for a generated\nmessage. An application may provide its own retry count,\nin which case the value of this object is ignored.")
snmpTargetAddrTagList = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 6), SnmpTagList().clone('')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrTagList.setDescription("This object contains a list of tag values which are\nused to select target addresses for a particular\noperation.")
snmpTargetAddrParams = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrParams.setDescription("The value of this object identifies an entry in the\nsnmpTargetParamsTable. The identified entry\ncontains SNMP parameters to be used when generating\nmessages to be sent to this transport address.")
snmpTargetAddrStorageType = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 8), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrStorageType.setDescription("The storage type for this conceptual row.\nConceptual rows having the value 'permanent' need not\nallow write-access to any columnar objects in the row.")
snmpTargetAddrRowStatus = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 2, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetAddrRowStatus.setDescription("The status of this conceptual row.\n\nTo create a row in this table, a manager must\nset this object to either createAndGo(4) or\ncreateAndWait(5).\n\nUntil instances of all corresponding columns are\nappropriately configured, the value of the\ncorresponding instance of the snmpTargetAddrRowStatus\ncolumn is 'notReady'.\n\nIn particular, a newly created row cannot be made\nactive until the corresponding instances of\nsnmpTargetAddrTDomain, snmpTargetAddrTAddress, and\nsnmpTargetAddrParams have all been set.\n\nThe following objects may not be modified while the\nvalue of this object is active(1):\n - snmpTargetAddrTDomain\n - snmpTargetAddrTAddress\nAn attempt to set these objects while the value of\nsnmpTargetAddrRowStatus is active(1) will result in\nan inconsistentValue error.")
snmpTargetParamsTable = MibTable((1, 3, 6, 1, 6, 3, 12, 1, 3))
if mibBuilder.loadTexts: snmpTargetParamsTable.setDescription("A table of SNMP target information to be used\nin the generation of SNMP messages.")
snmpTargetParamsEntry = MibTableRow((1, 3, 6, 1, 6, 3, 12, 1, 3, 1)).setIndexNames((1, "SNMP-TARGET-MIB", "snmpTargetParamsName"))
if mibBuilder.loadTexts: snmpTargetParamsEntry.setDescription("A set of SNMP target information.\n\nEntries in the snmpTargetParamsTable are created and\ndeleted using the snmpTargetParamsRowStatus object.")
snmpTargetParamsName = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: snmpTargetParamsName.setDescription("The locally arbitrary, but unique identifier associated\nwith this snmpTargetParamsEntry.")
snmpTargetParamsMPModel = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 2), SnmpMessageProcessingModel()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsMPModel.setDescription("The Message Processing Model to be used when generating\nSNMP messages using this entry.")
snmpTargetParamsSecurityModel = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 3), SnmpSecurityModel().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsSecurityModel.setDescription("The Security Model to be used when generating SNMP\nmessages using this entry. An implementation may\nchoose to return an inconsistentValue error if an\nattempt is made to set this variable to a value\nfor a security model which the implementation does\nnot support.")
snmpTargetParamsSecurityName = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 4), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsSecurityName.setDescription("The securityName which identifies the Principal on\nwhose behalf SNMP messages will be generated using\nthis entry.")
snmpTargetParamsSecurityLevel = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 5), SnmpSecurityLevel()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsSecurityLevel.setDescription("The Level of Security to be used when generating\nSNMP messages using this entry.")
snmpTargetParamsStorageType = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 6), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsStorageType.setDescription("The storage type for this conceptual row.\nConceptual rows having the value 'permanent' need not\nallow write-access to any columnar objects in the row.")
snmpTargetParamsRowStatus = MibTableColumn((1, 3, 6, 1, 6, 3, 12, 1, 3, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpTargetParamsRowStatus.setDescription("The status of this conceptual row.\n\nTo create a row in this table, a manager must\nset this object to either createAndGo(4) or\ncreateAndWait(5).\n\nUntil instances of all corresponding columns are\nappropriately configured, the value of the\ncorresponding instance of the snmpTargetParamsRowStatus\ncolumn is 'notReady'.\n\nIn particular, a newly created row cannot be made\nactive until the corresponding\nsnmpTargetParamsMPModel,\nsnmpTargetParamsSecurityModel,\nsnmpTargetParamsSecurityName,\nand snmpTargetParamsSecurityLevel have all been set.\n\nThe following objects may not be modified while the\nvalue of this object is active(1):\n - snmpTargetParamsMPModel\n - snmpTargetParamsSecurityModel\n - snmpTargetParamsSecurityName\n - snmpTargetParamsSecurityLevel\nAn attempt to set these objects while the value of\nsnmpTargetParamsRowStatus is active(1) will result in\nan inconsistentValue error.")
snmpUnavailableContexts = MibScalar((1, 3, 6, 1, 6, 3, 12, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnavailableContexts.setDescription("The total number of packets received by the SNMP\nengine which were dropped because the context\ncontained in the message was unavailable.")
snmpUnknownContexts = MibScalar((1, 3, 6, 1, 6, 3, 12, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownContexts.setDescription("The total number of packets received by the SNMP\nengine which were dropped because the context\ncontained in the message was unknown.")
snmpTargetConformance = MibIdentifier((1, 3, 6, 1, 6, 3, 12, 3))
snmpTargetCompliances = MibIdentifier((1, 3, 6, 1, 6, 3, 12, 3, 1))
snmpTargetGroups = MibIdentifier((1, 3, 6, 1, 6, 3, 12, 3, 2))
# Augmentions
# Groups
snmpTargetBasicGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 12, 3, 2, 1)).setObjects(*(("SNMP-TARGET-MIB", "snmpTargetAddrTDomain"), ("SNMP-TARGET-MIB", "snmpTargetParamsMPModel"), ("SNMP-TARGET-MIB", "snmpTargetParamsSecurityName"), ("SNMP-TARGET-MIB", "snmpTargetAddrTAddress"), ("SNMP-TARGET-MIB", "snmpTargetSpinLock"), ("SNMP-TARGET-MIB", "snmpTargetParamsSecurityLevel"), ("SNMP-TARGET-MIB", "snmpTargetParamsSecurityModel"), ("SNMP-TARGET-MIB", "snmpTargetAddrRowStatus"), ("SNMP-TARGET-MIB", "snmpTargetAddrStorageType"), ("SNMP-TARGET-MIB", "snmpTargetAddrParams"), ("SNMP-TARGET-MIB", "snmpTargetParamsStorageType"), ("SNMP-TARGET-MIB", "snmpTargetAddrTagList"), ("SNMP-TARGET-MIB", "snmpTargetParamsRowStatus"), ) )
if mibBuilder.loadTexts: snmpTargetBasicGroup.setDescription("A collection of objects providing basic remote\nconfiguration of management targets.")
snmpTargetResponseGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 12, 3, 2, 2)).setObjects(*(("SNMP-TARGET-MIB", "snmpTargetAddrRetryCount"), ("SNMP-TARGET-MIB", "snmpTargetAddrTimeout"), ) )
if mibBuilder.loadTexts: snmpTargetResponseGroup.setDescription("A collection of objects providing remote configuration\nof management targets for applications which generate\nSNMP messages for which a response message would be\nexpected.")
snmpTargetCommandResponderGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 12, 3, 2, 3)).setObjects(*(("SNMP-TARGET-MIB", "snmpUnavailableContexts"), ("SNMP-TARGET-MIB", "snmpUnknownContexts"), ) )
if mibBuilder.loadTexts: snmpTargetCommandResponderGroup.setDescription("A collection of objects required for command responder\napplications, used for counting error conditions.")
# Compliances
snmpTargetCommandResponderCompliance = ModuleCompliance((1, 3, 6, 1, 6, 3, 12, 3, 1, 1)).setObjects(*(("SNMP-TARGET-MIB", "snmpTargetCommandResponderGroup"), ) )
if mibBuilder.loadTexts: snmpTargetCommandResponderCompliance.setDescription("The compliance statement for SNMP entities which include\na command responder application.")
# Exports
# Module identity
mibBuilder.exportSymbols("SNMP-TARGET-MIB", PYSNMP_MODULE_ID=snmpTargetMIB)
# Types
mibBuilder.exportSymbols("SNMP-TARGET-MIB", SnmpTagList=SnmpTagList, SnmpTagValue=SnmpTagValue)
# Objects
mibBuilder.exportSymbols("SNMP-TARGET-MIB", snmpTargetMIB=snmpTargetMIB, snmpTargetObjects=snmpTargetObjects, snmpTargetSpinLock=snmpTargetSpinLock, snmpTargetAddrTable=snmpTargetAddrTable, snmpTargetAddrEntry=snmpTargetAddrEntry, snmpTargetAddrName=snmpTargetAddrName, snmpTargetAddrTDomain=snmpTargetAddrTDomain, snmpTargetAddrTAddress=snmpTargetAddrTAddress, snmpTargetAddrTimeout=snmpTargetAddrTimeout, snmpTargetAddrRetryCount=snmpTargetAddrRetryCount, snmpTargetAddrTagList=snmpTargetAddrTagList, snmpTargetAddrParams=snmpTargetAddrParams, snmpTargetAddrStorageType=snmpTargetAddrStorageType, snmpTargetAddrRowStatus=snmpTargetAddrRowStatus, snmpTargetParamsTable=snmpTargetParamsTable, snmpTargetParamsEntry=snmpTargetParamsEntry, snmpTargetParamsName=snmpTargetParamsName, snmpTargetParamsMPModel=snmpTargetParamsMPModel, snmpTargetParamsSecurityModel=snmpTargetParamsSecurityModel, snmpTargetParamsSecurityName=snmpTargetParamsSecurityName, snmpTargetParamsSecurityLevel=snmpTargetParamsSecurityLevel, snmpTargetParamsStorageType=snmpTargetParamsStorageType, snmpTargetParamsRowStatus=snmpTargetParamsRowStatus, snmpUnavailableContexts=snmpUnavailableContexts, snmpUnknownContexts=snmpUnknownContexts, snmpTargetConformance=snmpTargetConformance, snmpTargetCompliances=snmpTargetCompliances, snmpTargetGroups=snmpTargetGroups)
# Groups
mibBuilder.exportSymbols("SNMP-TARGET-MIB", snmpTargetBasicGroup=snmpTargetBasicGroup, snmpTargetResponseGroup=snmpTargetResponseGroup, snmpTargetCommandResponderGroup=snmpTargetCommandResponderGroup)
# Compliances
mibBuilder.exportSymbols("SNMP-TARGET-MIB", snmpTargetCommandResponderCompliance=snmpTargetCommandResponderCompliance)
|
jairomoldes/PyTango | refs/heads/develop | doc/conf.py | 4 | # ------------------------------------------------------------------------------
# This file is part of PyTango (http://pytango.rtfd.io)
#
# Copyright 2006-2012 CELLS / ALBA Synchrotron, Bellaterra, Spain
# Copyright 2013-2014 European Synchrotron Radiation Facility, Grenoble, France
#
# Distributed under the terms of the GNU Lesser General Public License,
# either version 3 of the License, or (at your option) any later version.
# See LICENSE.txt for more info.
# ------------------------------------------------------------------------------
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('./'))
# Import tango
try:
import tango
except ImportError:
from mock_tango_extension import tango
from tango import Release
print("Building documentation for PyTango {0}".format(Release.version_long))
print("Using PyTango from: {0}".format(os.path.dirname(tango.__file__)))
needs_sphinx = "1.0"
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.imgmath',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'PyTango'
copyright = u"""Except where otherwise noted, content on this site is
licensed under a Creative Commons Attribution 3.0 License"""
#Ideally we would like to put the following html code for copyright... but how?
'''\
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/es/">
<img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/es/88x31.png" />
</a><br />
<span xmlns:dc="http://purl.org/dc/elements/1.1/" href="http://purl.org/dc/dcmitype/Text" property="dc:title" rel="dc:type">PyTango Documentation</span>
by
<span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">ESRF</span>
is licensed under a
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/es/">Creative Commons Attribution-Share Alike 3.0 Spain License</a>.'''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(Release.version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = Release.version_long
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
html_context = {
'extra_css_files': [
# 'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
# 'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
# '_static/jssor.css',
'_static/css/tango_cs_theme.css',
],
}
html_style = 'tango_cs_theme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = "PyTango documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = "PyTango"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': ['indexsidebar.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index' : 'index.html' }
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTangodoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'PyTango.tex', u'PyTango Documentation',
u'PyTango team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logo.png'
latex_elements = {
'fontpkg': '\\usepackage{palatino}',
'papersize': 'a4paper',
'pointsize': '10pt',
}
latex_show_urls = 'no'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for RST -----------------------------------------------------------
rst_epilog = """\
.. _Tango: http://www.tango-controls.org
.. _Python: http://python.org
.. _IPython: http://ipython.org
.. _numpy: http://www.numpy.org
.. _gevent: http://www.gevent.org
.. _asyncio: https://asyncio.readthedocs.io/en/latest/
.. _boost-python: http://www.boost.org/libs/python
.. _PyPi: https://pypi.python.org/pypi/pytango
.. _issues: https://github.com/tango-controls/pytango/issues
.. _PRs: https://github.com/tango-controls/pytango/pulls
"""
# -- Options for reference to other documentation ------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://docs.scipy.org/doc/numpy': None,
'http://ipython.org/ipython-doc/stable': None,
'http://api.mongodb.org/python/current': None,
'http://couchdb-python.readthedocs.io/en/latest': None,
'http://pycassa.github.io/pycassa': None,
'http://docs.sqlalchemy.org/en/latest': None,
}
todo_include_todos = True
def copy_spaces(origin):
r = ''
for x in range(len(origin)):
if origin[x] in (' ', '\t'):
r += origin[x]
else:
return r
return r
def type_to_link(tipus):
if tipus[:9] == 'sequence<' and tipus[-1:] == '>':
return 'sequence<' + type_to_link(tipus[9:-1]) + '>'
#elif tipus in dir(PyTango):
else:
return ':class:`' + tipus + "`"
#else:
# return tipus
def type_to_pytango_link(tipus):
if tipus[:9] == 'sequence<' and tipus[-1:] == '>':
return 'sequence<' + type_to_link(tipus[9:-1]) + '>'
elif tipus in dir(tango):
return ':class:`' + tipus + "`"
else:
return tipus
def possible_type_to_link(text):
if len(text) and text[0] == '(' and text[-1] == ')':
return '(' + type_to_link(text[1:-1]) +')'
return text
def parse_typed_line(line):
spacesSplit = line.strip().split(' ')
first = spacesSplit[0].strip()
return possible_type_to_link(first) + ' ' + ' '.join(spacesSplit[1:])
def parse_parameters(line):
spaces = copy_spaces(line)
miniLine = line.strip()
if miniLine[:2] != '- ':
return line
spl = miniLine[2:].split(':', 1)
assert(len(spl) == 2)
return spaces + ':' + spl[0].strip() + ': ' + parse_typed_line(spl[1])
def parse_bullet_with_type(line):
spaces = copy_spaces(line)
miniLine = line.strip()
if miniLine[:2] not in ['- ', '* ']:
return line
spl = miniLine.split(':', 1)
if len(spl) != 2:
return line
return spaces + spl[0] + ': ' + parse_typed_line(spl[1])
def parse_throws(line):
words = re.split('(\W+)', line)
assert(line == ''.join(words))
return ''.join(map(type_to_pytango_link, words))
# http://codedump.tumblr.com/post/94712647/handling-python-docstring-indentation
def docstring_to_lines(docstring):
if not docstring:
return []
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return trimmed
def search_ONLY_signature(name, text):
lines = docstring_to_lines(text)
# There should be ONE signature and must be the FIRST text
# Signature is the ONLY starting at position 0
signatureLine = None
for ln in range(len(lines)):
line = lines[ln]
if len(line.strip()) and line[0] != ' ':
parentesis = line.split('(', 1)
fname = parentesis[0].strip()
if len(parentesis)==2 and fname == name.rsplit('.',1)[1]:
if signatureLine is not None: # More than one signature!
return None
signatureLine = ln
else:
return None # There's a text as FIRST text that's NOT the signature!
if signatureLine is None:
return None
return lines[signatureLine]
def split_signature(text):
if text is None:
return None
# split "fname(params)", "returntype"
ops = text.split('->')
if len(ops) != 2:
return None
# get rid of "fname"
params = ops[0].strip()
ret_type = ops[1].strip()
p = params.find('(')
if p < 0:
return None
params = params[p:]
return params, ret_type
_with_only_one_signature_methods = {}
def __reformat_lines(app, what, name, obj, options, lines):
global _with_only_one_signature_methods
if what != 'method':
for ln in range(len(lines)):
lines[ln] = parse_bullet_with_type(lines[ln])
return
toinsert = []
parsingParameters = False
parsingThrows = False
toinsert.append((0, ""))
for ln in range(len(lines)):
line = lines[ln]
if len(line) and line[0] != ' ':
if name in _with_only_one_signature_methods:
# This method has one and only one signature. So it will
# be displayed by sphinx, there's no need for us to fake
# it here...
lines[ln] = ""
else:
parentesis = line.split('(', 1)
fname = parentesis[0].strip()
if len(parentesis)==2 and fname == name.rsplit('.',1)[1]:
sg = split_signature(line)
if sg is not None:
# Main lines are like small titles (**bold**):
lines[ln] = '**' + fname +'** *' + sg[0] + '* **->** ' + type_to_link(sg[1])
# Add an ENTER after the title, to make a different
# paragraph. So if I have 2 signatures, there's no problem
# with it...
toinsert.append((ln+1, ""))
## Main lines are like small titles (**bold**):
#lines[ln]='**' + line.strip() + '**'
## Add an ENTER after the title, to make a different
## paragraph. So if I have 2 signatures, there's no problem
## with it...
#toinsert.append((ln+1, ""))
# Mark the "New in this version" lines...
if line.strip()[:14] == "New in PyTango":
lines[ln] = copy_spaces(lines[ln]) + "*" + line.strip() + "*"
parsingParameters = False
parsingThrows = False
# Look for special control_words
# To replace the actual syntax: "Return : something"
# with the one understood by reStructuredText ":Return: something"
spl = line.strip().split(':', 1)
control_word = spl[0].strip()
if ((len(spl) != 2)
or (control_word not in ["Parameters", "Return", "Throws", "Example", "See Also" ]) ):
if parsingParameters:
lines[ln] = parse_parameters(line)
elif parsingThrows:
lines[ln] = parse_throws(line)
continue
parsingParameters = False
parsingThrows = False
spaces = copy_spaces(line)
# The Example control word is even more special. I will put
# the contents from the following line into a code tag (::)
if control_word == 'Example':
lines[ln] = spaces + ":" + control_word + ": " + spl[1]
toinsert.append((ln+1, ""))
toinsert.append((ln+1, spaces + ' ::'))
toinsert.append((ln+1, ""))
elif control_word == 'Parameters':
lines[ln] = spaces + ":Parameters:" + parse_parameters(spl[1])
parsingParameters = True
elif control_word == 'Return':
lines[ln] = spaces + ":Return: " + parse_typed_line(spl[1])
elif control_word == "Throws":
lines[ln] = spaces + ":Throws:" + parse_throws(spl[1])
parsingThrows = True
else:
lines[ln] = spaces + ":" + control_word + ": " + spl[1]
for x in range(len(toinsert)-1, -1, -1):
pos, txt = toinsert[x]
lines.insert(pos, txt)
def __process_signature(app, what, name, obj, options, signature, return_annotation):
global _with_only_one_signature_methods
if what != 'method':
return
sg = split_signature(search_ONLY_signature(name, obj.__doc__))
if sg is not None:
_with_only_one_signature_methods[name] = True
return sg
return (signature, return_annotation)
def setup(app):
# sphinx will call these methods when he finds an object to document.
# I want to edit the docstring to adapt its format to something more
# beautiful.
# I also want to edit the signature because boost methods have no
# signature. I will read the signature from the docstring.
# The order sphinx will call it is __process_signature, __reformat_lines.
# And it is important because I keep some information between the two
# processes
# Problem is __process_signature works great with python methods...
# but is not even called for methods defined by boost. So, as it is,
# is useless now.
#app.connect('autodoc-process-signature', __process_signature)
app.connect('autodoc-process-docstring', __reformat_lines)
|
nuncjo/odoo | refs/heads/8.0 | addons/hr_timesheet/wizard/hr_timesheet_sign_in_out.py | 340 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_so_project(osv.osv_memory):
_name = 'hr.sign.out.project'
_description = 'Sign Out By Project'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Project / Analytic Account', domain=[('type','=','normal')]),
'info': fields.char('Work Description', required=True),
'date_start': fields.datetime('Starting Date', readonly=True),
'date': fields.datetime('Closing Date'),
'analytic_amount': fields.float('Minimum Analytic Amount'),
'name': fields.char('Employee\'s Name', required=True, readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'server_date': fields.datetime('Current Date', required=True, readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def _get_empid(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
for employee in emp_obj.browse(cr, uid, emp_ids, context=context):
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_ids[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')}
def _get_empid2(self, cr, uid, context=None):
res = self._get_empid(cr, uid, context=context)
cr.execute('select name,action from hr_attendance where employee_id=%s order by name desc limit 1', (res['emp_id'],))
res['server_date'] = time.strftime('%Y-%m-%d %H:%M:%S')
date_start = cr.fetchone()
if date_start:
res['date_start'] = date_start[0]
return res
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_so_project, self).default_get(cr, uid, fields_list, context=context)
res.update(self._get_empid2(cr, uid, context=context))
return res
def _write(self, cr, uid, data, emp_id, context=None):
timesheet_obj = self.pool.get('hr.analytic.timesheet')
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
hour = (time.mktime(time.strptime(data['date'] or time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')) -
time.mktime(time.strptime(data['date_start'], '%Y-%m-%d %H:%M:%S'))) / 3600.0
minimum = data['analytic_amount']
if minimum:
hour = round(round((hour + minimum / 2) / minimum) * minimum, 2)
res = timesheet_obj.default_get(cr, uid, ['product_id','product_uom_id'], context=context)
if not res['product_uom_id']:
raise osv.except_osv(_('User Error!'), _('Please define cost unit for this employee.'))
up = timesheet_obj.on_change_unit_amount(cr, uid, False, res['product_id'], hour,False, res['product_uom_id'])['value']
res['name'] = data['info']
res['account_id'] = data['account_id'].id
res['unit_amount'] = hour
emp_journal = emp_obj.browse(cr, uid, emp_id, context=context).journal_id
res['journal_id'] = emp_journal and emp_journal.id or False
res.update(up)
up = timesheet_obj.on_change_account_id(cr, uid, [], res['account_id']).get('value', {})
res.update(up)
return timesheet_obj.create(cr, uid, res, context=context)
def sign_out_result_end(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_out', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
def sign_out_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'action', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class hr_si_project(osv.osv_memory):
_name = 'hr.sign.in.project'
_description = 'Sign In By Project'
_columns = {
'name': fields.char('Employee\'s Name', readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'date': fields.datetime('Starting Date'),
'server_date': fields.datetime('Current Date', readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise osv.except_osv(_('User Error!'), _('Please define employee for your user.'))
return False
def check_state(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
emp_id = self.default_get(cr, uid, ['emp_id'], context)['emp_id']
# get the latest action (sign_in or out) for this employee
cr.execute('select action from hr_attendance where employee_id=%s and action in (\'sign_in\',\'sign_out\') order by name desc limit 1', (emp_id,))
res = (cr.fetchone() or ('sign_out',))[0]
in_out = (res == 'sign_out') and 'in' or 'out'
#TODO: invert sign_in et sign_out
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_hr_timesheet_sign_%s' % in_out)], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.%s.project' % in_out,
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new'
}
def sign_in_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_in', 'action_date':data.date})
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_si_project, self).default_get(cr, uid, fields_list, context=context)
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_id:
for employee in emp_obj.browse(cr, uid, emp_id, context=context):
res.update({'name': employee.name, 'state': employee.state, 'emp_id': emp_id[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
priyaganti/rockstor-core | refs/heads/master | src/rockstor/smart_manager/views/generic_sprobe.py | 2 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf import settings
from django.db.models import Count
import rest_framework_custom as rfc
class GenericSProbeView(rfc.GenericView):
content_negotiation_class = rfc.IgnoreClient
def get_queryset(self):
limit = self.request.query_params.get(
'limit', settings.REST_FRAMEWORK['MAX_LIMIT'])
limit = int(limit)
t1 = self.request.query_params.get('t1', None)
t2 = self.request.query_params.get('t2', None)
group_field = self.request.query_params.get('group', None)
if (group_field is not None):
qs = []
distinct_fields = self.model_obj.objects.values(
group_field).annotate(c=Count(group_field))
filter_field = ('%s__exact' % group_field)
for d in distinct_fields:
qs.extend(self.model_obj.objects.filter(
**{filter_field: d[group_field]}).order_by('-ts')[0:limit])
return qs
if (t1 is not None and t2 is not None):
return self.model_obj.objects.filter(ts__gt=t1, ts__lte=t2)
sort_col = self.request.query_params.get('sortby', None)
if (sort_col is not None):
reverse = self.request.query_params.get('reverse', 'no')
if (reverse == 'yes'):
reverse = True
else:
reverse = False
return self._sorted_results(sort_col, reverse)
return self.model_obj.objects.all().order_by('-ts')[0:limit]
|
rgommers/scipy | refs/heads/master | scipy/signal/tests/test_filter_design.py | 7 | import warnings
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_equal, assert_, assert_approx_equal,
assert_allclose, assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from numpy import array, spacing, sin, pi, sort, sqrt
from scipy.signal import (argrelextrema, BadCoefficients, bessel, besselap, bilinear,
buttap, butter, buttord, cheb1ap, cheb1ord, cheb2ap,
cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord,
firwin, freqs_zpk, freqs, freqz, freqz_zpk,
gammatone, group_delay, iircomb, iirdesign, iirfilter,
iirnotch, iirpeak, lp2bp, lp2bs, lp2hp, lp2lp, normalize,
sos2tf, sos2zpk, sosfreqz, tf2sos, tf2zpk, zpk2sos,
zpk2tf, bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk,
lp2bs_zpk)
from scipy.signal.filter_design import (_cplxreal, _cplxpair, _norm_factor,
_bessel_poly, _bessel_zeros)
try:
import mpmath
except ImportError:
mpmath = None
def mpmath_check(min_ver):
return pytest.mark.skipif(mpmath is None or
LooseVersion(mpmath.__version__) < LooseVersion(min_ver),
reason="mpmath version >= %s required" % min_ver)
class TestCplxPair:
def test_trivial_input(self):
assert_equal(_cplxpair([]).size, 0)
assert_equal(_cplxpair(1), 1)
def test_output_order(self):
assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j])
a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2]
b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2]
assert_allclose(_cplxpair(a), b)
# points spaced around the unit circle
z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7)
z1 = np.copy(z)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
# Should be able to pair up all the conjugates
x = np.random.rand(10000) + 1j * np.random.rand(10000)
y = x.conj()
z = np.random.rand(10000)
x = np.concatenate((x, y, z))
np.random.shuffle(x)
c = _cplxpair(x)
# Every other element of head should be conjugates:
assert_allclose(c[0:20000:2], np.conj(c[1:20000:2]))
# Real parts of head should be in sorted order:
assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real))
# Tail should be sorted real numbers:
assert_allclose(c[20000:], np.sort(c[20000:]))
def test_real_integer_input(self):
assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2])
def test_tolerances(self):
eps = spacing(1)
assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps),
[-1j, 1j, 1+1j*eps])
# sorting close to 0
assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j])
# Not conjugates
assert_raises(ValueError, _cplxpair, [4+5j, 4+5j])
assert_raises(ValueError, _cplxpair, [1-7j, 1-7j])
# No pairs
assert_raises(ValueError, _cplxpair, [1+3j])
assert_raises(ValueError, _cplxpair, [1-3j])
class TestCplxReal:
def test_trivial_input(self):
assert_equal(_cplxreal([]), ([], []))
assert_equal(_cplxreal(1), ([], [1]))
def test_output_order(self):
zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1])))
assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1])
eps = spacing(1)
a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j,
1, 4, 2, 3, 0, 0,
2+3j, 2-3j,
1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order
3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j,
2-3j, 2+3j]
zc, zr = _cplxreal(a)
assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j,
3+1j])
assert_allclose(zr, [0, 0, 1, 2, 3, 4])
z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j,
0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j,
4+eps-2j, 4-1j, 4-eps+2j])
zc, zr = _cplxreal(z)
assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j,
4+2j])
assert_equal(zr, [])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j])
# No pairs
assert_raises(ValueError, _cplxreal, [1+3j])
assert_raises(ValueError, _cplxreal, [1-3j])
def test_real_integer_input(self):
zc, zr = _cplxreal([2, 0, 1, 4])
assert_array_equal(zc, [])
assert_array_equal(zr, [0, 1, 2, 4])
class TestTf2zpk:
@pytest.mark.parametrize('dt', (np.float64, np.complex128))
def test_simple(self, dt):
z_r = np.array([0.5, -0.5])
p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
# Sort the zeros/poles so that we don't fail the test if the order
# changes
z_r.sort()
p_r.sort()
b = np.poly(z_r).astype(dt)
a = np.poly(p_r).astype(dt)
z, p, k = tf2zpk(b, a)
z.sort()
# The real part of `p` is ~0.0, so sort by imaginary part
p = p[np.argsort(p.imag)]
assert_array_almost_equal(z, z_r)
assert_array_almost_equal(p, p_r)
assert_array_almost_equal(k, 1.)
assert k.dtype == dt
def test_bad_filter(self):
# Regression test for #651: better handling of badly conditioned
# filter coefficients.
with suppress_warnings():
warnings.simplefilter("error", BadCoefficients)
assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
class TestZpk2Tf:
def test_identity(self):
"""Test the identity transfer function."""
z = []
p = []
k = 1.
b, a = zpk2tf(z, p, k)
b_r = np.array([1.]) # desired result
a_r = np.array([1.]) # desired result
# The test for the *type* of the return values is a regression
# test for ticket #1095. In the case p=[], zpk2tf used to
# return the scalar 1.0 instead of array([1.0]).
assert_array_equal(b, b_r)
assert_(isinstance(b, np.ndarray))
assert_array_equal(a, a_r)
assert_(isinstance(a, np.ndarray))
class TestSos2Zpk:
def test_basic(self):
sos = [[1, 0, 1, 1, 0, -0.81],
[1, 0, 0, 1, 0, +0.49]]
z, p, k = sos2zpk(sos)
z2 = [1j, -1j, 0, 0]
p2 = [0.9, -0.9, 0.7j, -0.7j]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
assert_array_almost_equal(k, k2)
sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873],
[1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873],
[1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]]
z, p, k = sos2zpk(sos)
z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
0.8090 - 0.5878j, -1.0000 + 0.0000j, 0]
p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
0.7922 - 0.5755j, -0.9791 + 0.0000j, 0]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
sos = array([[1, 2, 3, 1, 0.2, 0.3],
[4, 5, 6, 1, 0.4, 0.5]])
z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j,
-0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j])
p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j,
-0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j])
k = 4
z2, p2, k2 = sos2zpk(sos)
assert_allclose(_cplxpair(z2), z)
assert_allclose(_cplxpair(p2), p)
assert_allclose(k2, k)
def test_fewer_zeros(self):
"""Test not the expected number of p/z (effectively at origin)."""
sos = butter(3, 0.1, output='sos')
z, p, k = sos2zpk(sos)
assert len(z) == 4
assert len(p) == 4
sos = butter(12, [5., 30.], 'bandpass', fs=1200., analog=False,
output='sos')
with pytest.warns(BadCoefficients, match='Badly conditioned'):
z, p, k = sos2zpk(sos)
assert len(z) == 24
assert len(p) == 24
class TestSos2Tf:
def test_basic(self):
sos = [[1, 1, 1, 1, 0, -1],
[-2, 3, 1, 1, 10, 1]]
b, a = sos2tf(sos)
assert_array_almost_equal(b, [-2, 1, 2, 4, 1])
assert_array_almost_equal(a, [1, 10, 0, -10, -1])
class TestTf2Sos:
def test_basic(self):
num = [2, 16, 44, 56, 32]
den = [3, 3, -15, 18, -12]
sos = tf2sos(num, den)
sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000],
[1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]]
assert_array_almost_equal(sos, sos2, decimal=4)
b = [1, -3, 11, -27, 18]
a = [16, 12, 2, -4, -1]
sos = tf2sos(b, a)
sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250],
[1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]]
# assert_array_almost_equal(sos, sos2, decimal=4)
class TestZpk2Sos:
@pytest.mark.parametrize('dt', 'fdgFDG')
@pytest.mark.parametrize('pairing', ('nearest', 'keep_odd'))
def test_dtypes(self, dt, pairing):
z = np.array([-1, -1]).astype(dt)
ct = dt.upper() # the poles have to be complex
p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j]).astype(ct)
k = np.array(1).astype(dt)
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
def test_basic(self):
for pairing in ('nearest', 'keep_odd'):
#
# Cases that match octave
#
z = [-1, -1]
p = [0.57149 + 0.29360j, 0.57149 - 0.29360j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1j, -1j]
p = [0.9, -0.9, 0.7j, -0.7j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 1, 1, 0, +0.49],
[1, 0, 0, 1, 0, -0.81]] # octave
# sos2 = [[0, 0, 1, 1, -0.9, 0],
# [1, 0, 1, 1, 0.9, 0]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = []
p = [0.8, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., 1., 0.3125],
[1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1., 1., 0.9j, -0.9j]
p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
[1, -2, 1, 1, -1.98, 0.9802]] # octave
# sos2 = [[1, -2, 1, 1, -0.2, 0.82],
# [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [0.9+0.1j, 0.9-0.1j, -0.9]
p = [0.75+0.25j, 0.75-0.25j, 0.9]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
if pairing == 'keep_odd':
sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
[1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
else: # pairing == 'nearest'
sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
[1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm
assert_array_almost_equal(sos, sos2, decimal=4)
#
# Cases that differ from octave:
#
z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+0.8090 - 0.5878j, -1.0000 + 0.0000j]
p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+0.7922 - 0.5755j, -0.9791 + 0.0000j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
# sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
# [1, -1.618, 1, 1, -1.5844, 0.95878],
# [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails
sos2 = [[1, 1, 0, 1, +0.97915, 0],
[1, 0.61803, 1, 1, +0.60515, 0.95873],
[1, -1.61803, 1, 1, -1.58430, 0.95873]]
assert_array_almost_equal(sos, sos2, decimal=4)
z = [-1 - 1.4142j, -1 + 1.4142j,
-0.625 - 1.0533j, -0.625 + 1.0533j]
p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
-0.1 - 0.5385j, -0.1 + 0.5385j]
k = 4
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[4, 8, 12, 1, 0.2, 0.3],
[1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB
# sos2 = [[4, 8, 12, 1, 0.4, 0.5],
# [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave
assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4)
z = []
p = [0.2, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., -0.2, 0.],
[1., 0., 0., 1., 1., 0.3125]]
# sos2 = [[1., 0., 0., 1., 1., 0.3125],
# [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
# The next two examples are adapted from Leland B. Jackson,
# "Digital Filters and Signal Processing (1995) p.400:
# http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
deg2rad = np.pi / 180.
k = 1.
# first example
thetas = [22.5, 45, 77.5]
mags = [0.8, 0.6, 0.9]
z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas])
z = np.concatenate((z, np.conj(z)))
p = np.array([mag * np.exp(theta * deg2rad * 1j)
for theta, mag in zip(thetas, mags)])
p = np.concatenate((p, np.conj(p)))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave,
# [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, -1.84776, 1, 1, -1.47821, 0.64]]
# Note that pole-zero pairing matches, but ordering is different
sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
[1, -1.84776, 1, 1, -1.47821, 0.64],
[1, -0.43288, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
# second example
z = np.array([np.exp(theta * deg2rad * 1j)
for theta in (85., 10.)])
z = np.concatenate((z, np.conj(z), [1, -1]))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong",
# [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, 0, -1, 1, -1.47821, 0.64000]]
# Our pole-zero pairing matches the text, Octave does not
sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
[1, -1.96962, 1, 1, -1.47821, 0.64],
[1, -0.17431, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
class TestFreqs:
def test_basic(self):
_, h = freqs([1.0], [1.0], worN=8)
assert_array_almost_equal(h, np.ones(8))
def test_output(self):
# 1st order low-pass filter: H(s) = 1 / (s + 1)
w = [0.1, 1, 10, 100]
num = [1]
den = [1, 1]
w, H = freqs(num, den, worN=w)
s = w * 1j
expected = 1 / (s + 1)
assert_array_almost_equal(H.real, expected.real)
assert_array_almost_equal(H.imag, expected.imag)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
# Expected range is from 0.01 to 10.
num = [1]
den = [1, 1]
n = 10
expected_w = np.logspace(-2, 1, n)
w, H = freqs(num, den, worN=n)
assert_array_almost_equal(w, expected_w)
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8,
plot=lambda w, h: 1 / 0)
freqs([1.0], [1.0], worN=8, plot=plot)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqs([1.0], [1.0])
w2, h2 = freqs([1.0], [1.0], None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
def test_w_or_N_types(self):
# Measure at 8 equally-spaced points
for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqs([1.0], [1.0], worN=N)
assert_equal(len(w), 8)
assert_array_almost_equal(h, np.ones(8))
# Measure at frequency 8 rad/sec
for w in (8.0, 8.0+0j):
w_out, h = freqs([1.0], [1.0], worN=w)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
class TestFreqs_zpk:
def test_basic(self):
_, h = freqs_zpk([1.0], [1.0], [1.0], worN=8)
assert_array_almost_equal(h, np.ones(8))
def test_output(self):
# 1st order low-pass filter: H(s) = 1 / (s + 1)
w = [0.1, 1, 10, 100]
z = []
p = [-1]
k = 1
w, H = freqs_zpk(z, p, k, worN=w)
s = w * 1j
expected = 1 / (s + 1)
assert_array_almost_equal(H.real, expected.real)
assert_array_almost_equal(H.imag, expected.imag)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
# Expected range is from 0.01 to 10.
z = []
p = [-1]
k = 1
n = 10
expected_w = np.logspace(-2, 1, n)
w, H = freqs_zpk(z, p, k, worN=n)
assert_array_almost_equal(w, expected_w)
def test_vs_freqs(self):
b, a = cheby1(4, 5, 100, analog=True, output='ba')
z, p, k = cheby1(4, 5, 100, analog=True, output='zpk')
w1, h1 = freqs(b, a)
w2, h2 = freqs_zpk(z, p, k)
assert_allclose(w1, w2)
assert_allclose(h1, h2, rtol=1e-6)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqs_zpk([1.0], [1.0], [1.0])
w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
def test_w_or_N_types(self):
# Measure at 8 equally-spaced points
for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqs_zpk([], [], 1, worN=N)
assert_equal(len(w), 8)
assert_array_almost_equal(h, np.ones(8))
# Measure at frequency 8 rad/sec
for w in (8.0, 8.0+0j):
w_out, h = freqs_zpk([], [], 1, worN=w)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
class TestFreqz:
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz([1.0], worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz([1.0], worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
assert_array_almost_equal(h, np.ones(8))
w, h = freqz([1.0], worN=9)
assert_array_almost_equal(w, np.pi * np.arange(9) / 9.)
assert_array_almost_equal(h, np.ones(9))
for a in [1, np.ones(2)]:
w, h = freqz(np.ones(2), a, worN=0)
assert_equal(w.shape, (0,))
assert_equal(h.shape, (0,))
assert_equal(h.dtype, np.dtype('complex128'))
t = np.linspace(0, 1, 4, endpoint=False)
for b, a, h_whole in zip(
([1., 0, 0, 0], np.sin(2 * np.pi * t)),
([1., 0, 0, 0], [0.5, 0, 0, 0]),
([1., 1., 1., 1.], [0, -4j, 0, 4j])):
w, h = freqz(b, a, worN=4, whole=True)
expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
# simultaneously check int-like support
w, h = freqz(b, a, worN=np.int32(4), whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
w, h = freqz(b, a, worN=w, whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, h_whole)
def test_basic_whole(self):
w, h = freqz([1.0], worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError, freqz, [1.0], worN=8,
plot=lambda w, h: 1 / 0)
freqz([1.0], worN=8, plot=plot)
def test_fft_wrapping(self):
# Some simple real FIR filters
bs = list() # filters
as_ = list()
hs_whole = list()
hs_half = list()
# 3 taps
t = np.linspace(0, 1, 3, endpoint=False)
bs.append(np.sin(2 * np.pi * t))
as_.append(3.)
hs_whole.append([0, -0.5j, 0.5j])
hs_half.append([0, np.sqrt(1./12.), -0.5j])
# 4 taps
t = np.linspace(0, 1, 4, endpoint=False)
bs.append(np.sin(2 * np.pi * t))
as_.append(0.5)
hs_whole.append([0, -4j, 0, 4j])
hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)])
del t
for ii, b in enumerate(bs):
# whole
a = as_[ii]
expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False)
w, h = freqz(b, a, worN=expected_w, whole=True) # polyval
err_msg = 'b = %s, a=%s' % (b, a)
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
w, h = freqz(b, a, worN=len(b), whole=True) # FFT
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
# non-whole
expected_w = np.linspace(0, np.pi, len(b), endpoint=False)
w, h = freqz(b, a, worN=expected_w, whole=False) # polyval
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
w, h = freqz(b, a, worN=len(b), whole=False) # FFT
assert_array_almost_equal(w, expected_w, err_msg=err_msg)
assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
# some random FIR filters (real + complex)
# assume polyval is accurate
rng = np.random.RandomState(0)
for ii in range(2, 10): # number of taps
b = rng.randn(ii)
for kk in range(2):
a = rng.randn(1) if kk == 0 else rng.randn(3)
for jj in range(2):
if jj == 1:
b = b + rng.randn(ii) * 1j
# whole
expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False)
w, expected_h = freqz(b, a, worN=expected_w, whole=True)
assert_array_almost_equal(w, expected_w)
w, h = freqz(b, a, worN=ii, whole=True)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, expected_h)
# half
expected_w = np.linspace(0, np.pi, ii, endpoint=False)
w, expected_h = freqz(b, a, worN=expected_w, whole=False)
assert_array_almost_equal(w, expected_w)
w, h = freqz(b, a, worN=ii, whole=False)
assert_array_almost_equal(w, expected_w)
assert_array_almost_equal(h, expected_h)
def test_broadcasting1(self):
# Test broadcasting with worN an integer or a 1-D array,
# b and a are n-dimensional arrays.
np.random.seed(123)
b = np.random.rand(3, 5, 1)
a = np.random.rand(2, 1)
for whole in [False, True]:
# Test with worN being integers (one fast for FFT and one not),
# a 1-D array, and an empty array.
for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]:
w, h = freqz(b, a, worN=worN, whole=whole)
for k in range(b.shape[1]):
bk = b[:, k, 0]
ak = a[:, 0]
ww, hh = freqz(bk, ak, worN=worN, whole=whole)
assert_allclose(ww, w)
assert_allclose(hh, h[k])
def test_broadcasting2(self):
# Test broadcasting with worN an integer or a 1-D array,
# b is an n-dimensional array, and a is left at the default value.
np.random.seed(123)
b = np.random.rand(3, 5, 1)
for whole in [False, True]:
for worN in [16, 17, np.linspace(0, 1, 10)]:
w, h = freqz(b, worN=worN, whole=whole)
for k in range(b.shape[1]):
bk = b[:, k, 0]
ww, hh = freqz(bk, worN=worN, whole=whole)
assert_allclose(ww, w)
assert_allclose(hh, h[k])
def test_broadcasting3(self):
# Test broadcasting where b.shape[-1] is the same length
# as worN, and a is left at the default value.
np.random.seed(123)
N = 16
b = np.random.rand(3, N)
for whole in [False, True]:
for worN in [N, np.linspace(0, 1, N)]:
w, h = freqz(b, worN=worN, whole=whole)
assert_equal(w.size, N)
for k in range(N):
bk = b[:, k]
ww, hh = freqz(bk, worN=w[k], whole=whole)
assert_allclose(ww, w[k])
assert_allclose(hh, h[k])
def test_broadcasting4(self):
# Test broadcasting with worN a 2-D array.
np.random.seed(123)
b = np.random.rand(4, 2, 1, 1)
a = np.random.rand(5, 2, 1, 1)
for whole in [False, True]:
for worN in [np.random.rand(6, 7), np.empty((6, 0))]:
w, h = freqz(b, a, worN=worN, whole=whole)
assert_allclose(w, worN, rtol=1e-14)
assert_equal(h.shape, (2,) + worN.shape)
for k in range(2):
ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0],
worN=worN.ravel(),
whole=whole)
assert_allclose(ww, worN.ravel(), rtol=1e-14)
assert_allclose(hh, h[k, :, :].ravel())
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqz([1.0], 1)
w2, h2 = freqz([1.0], 1, None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
def test_fs_param(self):
fs = 900
b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311,
0.039479155677484369]
a = [1.0, -1.3199152021838287, 0.80341991081938424,
-0.16767146321568049]
# N = None, whole=False
w1, h1 = freqz(b, a, fs=fs)
w2, h2 = freqz(b, a)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
# N = None, whole=True
w1, h1 = freqz(b, a, whole=True, fs=fs)
w2, h2 = freqz(b, a, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
# N = 5, whole=False
w1, h1 = freqz(b, a, 5, fs=fs)
w2, h2 = freqz(b, a, 5)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
# N = 5, whole=True
w1, h1 = freqz(b, a, 5, whole=True, fs=fs)
w2, h2 = freqz(b, a, 5, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
# w is an array_like
for w in ([123], (123,), np.array([123]), (50, 123, 230),
np.array([50, 123, 230])):
w1, h1 = freqz(b, a, w, fs=fs)
w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs)
assert_allclose(h1, h2)
assert_allclose(w, w1)
def test_w_or_N_types(self):
# Measure at 7 (polyval) or 8 (fft) equally-spaced points
for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7),
np.array(7),
8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqz([1.0], worN=N)
assert_array_almost_equal(w, np.pi * np.arange(N) / N)
assert_array_almost_equal(h, np.ones(N))
w, h = freqz([1.0], worN=N, fs=100)
assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False))
assert_array_almost_equal(h, np.ones(N))
# Measure at frequency 8 Hz
for w in (8.0, 8.0+0j):
# Only makes sense when fs is specified
w_out, h = freqz([1.0], worN=w, fs=100)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
def test_nyquist(self):
w, h = freqz([1.0], worN=8, include_nyquist=True)
assert_array_almost_equal(w, np.pi * np.arange(8) / 7.)
assert_array_almost_equal(h, np.ones(8))
w, h = freqz([1.0], worN=9, include_nyquist=True)
assert_array_almost_equal(w, np.pi * np.arange(9) / 8.)
assert_array_almost_equal(h, np.ones(9))
for a in [1, np.ones(2)]:
w, h = freqz(np.ones(2), a, worN=0, include_nyquist=True)
assert_equal(w.shape, (0,))
assert_equal(h.shape, (0,))
assert_equal(h.dtype, np.dtype('complex128'))
w1, h1 = freqz([1.0], worN=8, whole = True, include_nyquist=True)
w2, h2 = freqz([1.0], worN=8, whole = True, include_nyquist=False)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
class TestSOSFreqz:
def test_sosfreqz_basic(self):
# Compare the results of freqz and sosfreqz for a low order
# Butterworth filter.
N = 500
b, a = butter(4, 0.2)
sos = butter(4, 0.2, output='sos')
w, h = freqz(b, a, worN=N)
w2, h2 = sosfreqz(sos, worN=N)
assert_equal(w2, w)
assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass')
sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos')
w, h = freqz(b, a, worN=N)
w2, h2 = sosfreqz(sos, worN=N)
assert_equal(w2, w)
assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
# must have at least one section
assert_raises(ValueError, sosfreqz, sos[:0])
def test_sosfrez_design(self):
# Compare sosfreqz output against expected values for different
# filter types
# from cheb2ord
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
sos = cheby2(N, 60, Wn, 'stop', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w <= 0.1]), 0, atol=3.01)
assert_allclose(20 * np.log10(h[w >= 0.6]), 0., atol=3.01)
assert_allclose(h[(w >= 0.2) & (w <= 0.5)], 0., atol=1e-3) # <= -60 dB
N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150)
sos = cheby2(N, 150, Wn, 'stop', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.abs(h))
w /= np.pi
assert_allclose(dB[w <= 0.1], 0, atol=3.01)
assert_allclose(dB[w >= 0.6], 0., atol=3.01)
assert_array_less(dB[(w >= 0.2) & (w <= 0.5)], -149.9)
# from cheb1ord
N, Wn = cheb1ord(0.2, 0.3, 3, 40)
sos = cheby1(N, 3, Wn, 'low', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w <= 0.2]), 0, atol=3.01)
assert_allclose(h[w >= 0.3], 0., atol=1e-2) # <= -40 dB
N, Wn = cheb1ord(0.2, 0.3, 1, 150)
sos = cheby1(N, 1, Wn, 'low', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.abs(h))
w /= np.pi
assert_allclose(dB[w <= 0.2], 0, atol=1.01)
assert_array_less(dB[w >= 0.3], -149.9)
# adapted from ellipord
N, Wn = ellipord(0.3, 0.2, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx)
# adapted from buttord
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40)
sos = butter(N, Wn, 'band', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(h[w <= 0.14], 0., atol=1e-2) # <= -40 dB
assert_allclose(h[w >= 0.6], 0., atol=1e-2) # <= -40 dB
assert_allclose(20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]),
0, atol=3.01)
N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100)
sos = butter(N, Wn, 'band', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
w /= np.pi
assert_array_less(dB[(w > 0) & (w <= 0.14)], -99.9)
assert_array_less(dB[w >= 0.6], -99.9)
assert_allclose(dB[(w >= 0.2) & (w <= 0.5)], 0, atol=3.01)
def test_sosfreqz_design_ellip(self):
N, Wn = ellipord(0.3, 0.1, 3, 60)
sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
h = np.abs(h)
w /= np.pi
assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx)
N, Wn = ellipord(0.3, 0.2, .5, 150)
sos = ellip(N, .5, 150, Wn, 'high', output='sos')
w, h = sosfreqz(sos)
dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
w /= np.pi
assert_allclose(dB[w >= 0.3], 0, atol=.55)
assert_array_less(dB[w <= 0.2], -150)
@mpmath_check("0.10")
def test_sos_freqz_against_mp(self):
# Compare the result of sosfreqz applied to a high order Butterworth
# filter against the result computed using mpmath. (signal.freqz fails
# miserably with such high order filters.)
from . import mpsig
N = 500
order = 25
Wn = 0.15
with mpmath.workdps(80):
z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn)
w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N)
w_mp = np.array([float(x) for x in w_mp])
h_mp = np.array([complex(x) for x in h_mp])
sos = butter(order, Wn, output='sos')
w, h = sosfreqz(sos, worN=N)
assert_allclose(w, w_mp, rtol=1e-12, atol=1e-14)
assert_allclose(h, h_mp, rtol=1e-12, atol=1e-14)
def test_fs_param(self):
fs = 900
sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762,
1.0, -0.37256600288916636, 0.0],
[1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]]
# N = None, whole=False
w1, h1 = sosfreqz(sos, fs=fs)
w2, h2 = sosfreqz(sos)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
# N = None, whole=True
w1, h1 = sosfreqz(sos, whole=True, fs=fs)
w2, h2 = sosfreqz(sos, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
# N = 5, whole=False
w1, h1 = sosfreqz(sos, 5, fs=fs)
w2, h2 = sosfreqz(sos, 5)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
# N = 5, whole=True
w1, h1 = sosfreqz(sos, 5, whole=True, fs=fs)
w2, h2 = sosfreqz(sos, 5, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
# w is an array_like
for w in ([123], (123,), np.array([123]), (50, 123, 230),
np.array([50, 123, 230])):
w1, h1 = sosfreqz(sos, w, fs=fs)
w2, h2 = sosfreqz(sos, 2*pi*np.array(w)/fs)
assert_allclose(h1, h2)
assert_allclose(w, w1)
def test_w_or_N_types(self):
# Measure at 7 (polyval) or 8 (fft) equally-spaced points
for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7),
np.array(7),
8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N)
assert_array_almost_equal(w, np.pi * np.arange(N) / N)
assert_array_almost_equal(h, np.ones(N))
w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N, fs=100)
assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False))
assert_array_almost_equal(h, np.ones(N))
# Measure at frequency 8 Hz
for w in (8.0, 8.0+0j):
# Only makes sense when fs is specified
w_out, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=w, fs=100)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
class TestFreqz_zpk:
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_basic_whole(self):
w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_vs_freqz(self):
b, a = cheby1(4, 5, 0.5, analog=False, output='ba')
z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk')
w1, h1 = freqz(b, a)
w2, h2 = freqz_zpk(z, p, k)
assert_allclose(w1, w2)
assert_allclose(h1, h2, rtol=1e-6)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqz_zpk([0.5], [0.5], 1.0)
w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
def test_fs_param(self):
fs = 900
z = [-1, -1, -1]
p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636,
0.4747869998473389-0.4752230717749344j]
k = 0.03934683014103762
# N = None, whole=False
w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs)
w2, h2 = freqz_zpk(z, p, k, whole=False)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
# N = None, whole=True
w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs)
w2, h2 = freqz_zpk(z, p, k, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
# N = 5, whole=False
w1, h1 = freqz_zpk(z, p, k, 5, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 5)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
# N = 5, whole=True
w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 5, whole=True)
assert_allclose(h1, h2)
assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
# w is an array_like
for w in ([123], (123,), np.array([123]), (50, 123, 230),
np.array([50, 123, 230])):
w1, h1 = freqz_zpk(z, p, k, w, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs)
assert_allclose(h1, h2)
assert_allclose(w, w1)
def test_w_or_N_types(self):
# Measure at 8 equally-spaced points
for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqz_zpk([], [], 1, worN=N)
assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
assert_array_almost_equal(h, np.ones(8))
w, h = freqz_zpk([], [], 1, worN=N, fs=100)
assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False))
assert_array_almost_equal(h, np.ones(8))
# Measure at frequency 8 Hz
for w in (8.0, 8.0+0j):
# Only makes sense when fs is specified
w_out, h = freqz_zpk([], [], 1, worN=w, fs=100)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
class TestNormalize:
def test_allclose(self):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
assert_array_almost_equal(b_matlab, b_output, decimal=13)
assert_array_almost_equal(a_matlab, a_output, decimal=13)
def test_errors(self):
"""Test the error cases."""
# all zero denominator
assert_raises(ValueError, normalize, [1, 2], 0)
# denominator not 1 dimensional
assert_raises(ValueError, normalize, [1, 2], [[1]])
# numerator too many dimensions
assert_raises(ValueError, normalize, [[[1, 2]]], 1)
class TestLp2lp:
def test_basic(self):
b = [1]
a = [1, np.sqrt(2), 1]
b_lp, a_lp = lp2lp(b, a, 0.38574256627112119)
assert_array_almost_equal(b_lp, [0.1488], decimal=4)
assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4)
class TestLp2hp:
def test_basic(self):
b = [0.25059432325190018]
a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000)
assert_allclose(b_hp, [1, 0, 0, 0])
assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4)
class TestLp2bp:
def test_basic(self):
b = [1]
a = [1, 2, 2, 1]
b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000)
assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6)
assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13,
1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4)
class TestLp2bs:
def test_basic(self):
b = [1]
a = [1, 1]
b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5)
assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5)
class TestBilinear:
def test_basic(self):
b = [0.14879732743343033]
a = [1, 0.54552236880522209, 0.14879732743343033]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821],
decimal=5)
assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4)
b = [1, 0, 0.17407467530697837]
a = [1, 0.18460575326152251, 0.17407467530697837]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413],
decimal=4)
assert_array_almost_equal(a_z, [1, -1.2158, 0.72826],
decimal=4)
class TestLp2lp_zpk:
def test_basic(self):
z = []
p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
k = 1
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5)
assert_array_equal(z_lp, [])
assert_allclose(sort(p_lp), sort(p)*5)
assert_allclose(k_lp, 25)
# Pseudo-Chebyshev with both poles and zeros
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20)
assert_allclose(sort(z_lp), sort([-40j, +40j]))
assert_allclose(sort(p_lp), sort([-15, -10-10j, -10+10j]))
assert_allclose(k_lp, 60)
class TestLp2hp_zpk:
def test_basic(self):
z = []
p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
k = 1
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5)
assert_array_equal(z_hp, [0, 0])
assert_allclose(sort(p_hp), sort(p)*5)
assert_allclose(k_hp, 1)
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6)
assert_allclose(sort(z_hp), sort([-3j, 0, +3j]))
assert_allclose(sort(p_hp), sort([-8, -6-6j, -6+6j]))
assert_allclose(k_hp, 32)
class TestLp2bp_zpk:
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8)
assert_allclose(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j]))
assert_allclose(sort(p_bp), sort([-3 + 6j*sqrt(6),
-3 - 6j*sqrt(6),
+2j+sqrt(-8j-225)-2,
-2j+sqrt(+8j-225)-2,
+2j-sqrt(-8j-225)-2,
-2j-sqrt(+8j-225)-2, ]))
assert_allclose(k_bp, 24)
class TestLp2bs_zpk:
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12)
assert_allclose(sort(z_bs), sort([+35j, -35j,
+3j+sqrt(1234)*1j,
-3j+sqrt(1234)*1j,
+3j-sqrt(1234)*1j,
-3j-sqrt(1234)*1j]))
assert_allclose(sort(p_bs), sort([+3j*sqrt(129) - 8,
-3j*sqrt(129) - 8,
(-6 + 6j) - sqrt(-1225 - 72j),
(-6 - 6j) - sqrt(-1225 + 72j),
(-6 + 6j) + sqrt(-1225 - 72j),
(-6 - 6j) + sqrt(-1225 + 72j), ]))
assert_allclose(k_bs, 32)
class TestBilinear_zpk:
def test_basic(self):
z = [-2j, +2j]
p = [-0.75, -0.5-0.5j, -0.5+0.5j]
k = 3
z_d, p_d, k_d = bilinear_zpk(z, p, k, 10)
assert_allclose(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j),
-1]))
assert_allclose(sort(p_d), sort([77/83,
(1j/2 + 39/2) / (41/2 - 1j/2),
(39/2 - 1j/2) / (1j/2 + 41/2), ]))
assert_allclose(k_d, 9696/69803)
class TestPrototypeType:
def test_output_type(self):
# Prototypes should consistently output arrays, not lists
# https://github.com/scipy/scipy/pull/441
for func in (buttap,
besselap,
lambda N: cheb1ap(N, 1),
lambda N: cheb2ap(N, 20),
lambda N: ellipap(N, 1, 20)):
for N in range(7):
z, p, k = func(N)
assert_(isinstance(z, np.ndarray))
assert_(isinstance(p, np.ndarray))
def dB(x):
# Return magnitude in decibels, avoiding divide-by-zero warnings
# (and deal with some "not less-ordered" errors when -inf shows up)
return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny))
class TestButtord:
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'lowpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 16)
assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'highpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs)
assert_equal(N, 18)
assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 18)
assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandstop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs)
assert_equal(N, 20)
assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01],
rtol=1e-6)
def test_analog(self):
wp = 200
ws = 600
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, True)
b, a = butter(N, Wn, 'lowpass', True)
w, h = freqs(b, a)
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 7)
assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15)
n, Wn = buttord(1, 550/450, 1, 26, analog=True)
assert_equal(n, 19)
assert_allclose(Wn, 1.0361980524629517, rtol=1e-15)
assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55)
def test_fs_param(self):
wp = [4410, 11025]
ws = [2205, 13230]
rp = 3
rs = 80
fs = 44100
N, Wn = buttord(wp, ws, rp, rs, False, fs=fs)
b, a = butter(N, Wn, 'bandpass', False, fs=fs)
w, h = freqz(b, a, fs=fs)
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 18)
assert_allclose(Wn, [4409.722701715714, 11025.47178084662],
rtol=1e-15)
def test_invalid_input(self):
with pytest.raises(ValueError) as exc_info:
buttord([20, 50], [14, 60], 3, 2)
assert "gpass should be smaller than gstop" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
buttord([20, 50], [14, 60], -1, 2)
assert "gpass should be larger than 0.0" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
buttord([20, 50], [14, 60], 1, -2)
assert "gstop should be larger than 0.0" in str(exc_info.value)
class TestCheb1ord:
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'low', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'high', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'band', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'stop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5)
def test_analog(self):
wp = 700
ws = 100
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, True)
b, a = cheby1(N, rp, Wn, 'high', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 4)
assert_allclose(Wn, 700, rtol=1e-15)
assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17)
def test_fs_param(self):
wp = 4800
ws = 7200
rp = 3
rs = 60
fs = 48000
N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs)
b, a = cheby1(N, rp, Wn, 'low', False, fs=fs)
w, h = freqz(b, a, fs=fs)
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 4800, rtol=1e-15)
def test_invalid_input(self):
with pytest.raises(ValueError) as exc_info:
cheb1ord(0.2, 0.3, 3, 2)
assert "gpass should be smaller than gstop" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
cheb1ord(0.2, 0.3, -1, 2)
assert "gpass should be larger than 0.0" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
cheb1ord(0.2, 0.3, 1, -2)
assert "gstop should be larger than 0.0" in str(exc_info.value)
class TestCheb2ord:
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.28647639976553163, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.20697492182903282, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362],
rtol=1e-6)
def test_analog(self):
wp = [20, 50]
ws = [10, 60]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, True)
b, a = cheby2(N, rs, Wn, 'bp', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 11)
assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01],
rtol=1e-15)
def test_fs_param(self):
wp = 150
ws = 100
rp = 3
rs = 70
fs = 1000
N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs)
b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs)
w, h = freqz(b, a, fs=fs)
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 103.4874609145164, rtol=1e-15)
def test_invalid_input(self):
with pytest.raises(ValueError) as exc_info:
cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 2)
assert "gpass should be smaller than gstop" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
cheb2ord([0.1, 0.6], [0.2, 0.5], -1, 2)
assert "gpass should be larger than 0.0" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
cheb2ord([0.1, 0.6], [0.2, 0.5], 1, -2)
assert "gstop should be larger than 0.0" in str(exc_info.value)
class TestEllipord:
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 5)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_lowpass_1000dB(self):
# failed when ellipkm1 wasn't used in ellipord and ellipap
wp = 0.2
ws = 0.3
rp = 3
rs = 1000
N, Wn = ellipord(wp, ws, rp, rs, False)
sos = ellip(N, rp, rs, Wn, 'lp', False, output='sos')
w, h = sosfreqz(sos)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 7)
assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5)
def test_analog(self):
wp = [1000, 6000]
ws = [2000, 5000]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, True)
b, a = ellip(N, rp, rs, Wn, 'bs', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, [1666.6666, 6000])
assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9)
def test_fs_param(self):
wp = [400, 2400]
ws = [800, 2000]
rp = 3
rs = 90
fs = 8000
N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs)
b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs)
w, h = freqz(b, a, fs=fs)
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 7)
assert_allclose(Wn, [590.3293117737195, 2400], rtol=1e-5)
def test_invalid_input(self):
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, 3, 2)
assert "gpass should be smaller than gstop" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, -1, 2)
assert "gpass should be larger than 0.0" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
ellipord(0.2, 0.5, 1, -2)
assert "gstop should be larger than 0.0" in str(exc_info.value)
class TestBessel:
def test_degenerate(self):
for norm in ('delay', 'phase', 'mag'):
# 0-order filter is just a passthrough
b, a = bessel(0, 1, analog=True, norm=norm)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = bessel(1, 1, analog=True, norm=norm)
assert_allclose(b, [1], rtol=1e-15)
assert_allclose(a, [1, 1], rtol=1e-15)
z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm)
assert_array_equal(z, [])
assert_allclose(p, [-0.3], rtol=1e-14)
assert_allclose(k, 0.3, rtol=1e-14)
def test_high_order(self):
# high even order, 'phase'
z, p, k = bessel(24, 100, analog=True, output='zpk')
z2 = []
p2 = [
-9.055312334014323e+01 + 4.844005815403969e+00j,
-8.983105162681878e+01 + 1.454056170018573e+01j,
-8.837357994162065e+01 + 2.426335240122282e+01j,
-8.615278316179575e+01 + 3.403202098404543e+01j,
-8.312326467067703e+01 + 4.386985940217900e+01j,
-7.921695461084202e+01 + 5.380628489700191e+01j,
-7.433392285433246e+01 + 6.388084216250878e+01j,
-6.832565803501586e+01 + 7.415032695116071e+01j,
-6.096221567378025e+01 + 8.470292433074425e+01j,
-5.185914574820616e+01 + 9.569048385258847e+01j,
-4.027853855197555e+01 + 1.074195196518679e+02j,
-2.433481337524861e+01 + 1.207298683731973e+02j,
]
k2 = 9.999999999999989e+47
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high odd order, 'phase'
z, p, k = bessel(23, 1000, analog=True, output='zpk')
z2 = []
p2 = [
-2.497697202208956e+02 + 1.202813187870698e+03j,
-4.126986617510172e+02 + 1.065328794475509e+03j,
-5.304922463809596e+02 + 9.439760364018479e+02j,
-9.027564978975828e+02 + 1.010534334242318e+02j,
-8.909283244406079e+02 + 2.023024699647598e+02j,
-8.709469394347836e+02 + 3.039581994804637e+02j,
-8.423805948131370e+02 + 4.062657947488952e+02j,
-8.045561642249877e+02 + 5.095305912401127e+02j,
-7.564660146766259e+02 + 6.141594859516342e+02j,
-6.965966033906477e+02 + 7.207341374730186e+02j,
-6.225903228776276e+02 + 8.301558302815096e+02j,
-9.066732476324988e+02]
k2 = 9.999999999999983e+68
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high even order, 'delay' (Orchard 1965 "The Roots of the
# Maximally Flat-Delay Polynomials" Table 1)
z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay')
p2 = [-20.876706,
-20.826543 + 1.735732j,
-20.675502 + 3.473320j,
-20.421895 + 5.214702j,
-20.062802 + 6.961982j,
-19.593895 + 8.717546j,
-19.009148 + 10.484195j,
-18.300400 + 12.265351j,
-17.456663 + 14.065350j,
-16.463032 + 15.889910j,
-15.298849 + 17.746914j,
-13.934466 + 19.647827j,
-12.324914 + 21.610519j,
-10.395893 + 23.665701j,
- 8.005600 + 25.875019j,
- 4.792045 + 28.406037j,
]
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
# high odd order, 'delay'
z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay')
p2 = [-20.201029 + 0.867750j,
-20.097257 + 2.604235j,
-19.888485 + 4.343721j,
-19.572188 + 6.088363j,
-19.144380 + 7.840570j,
-18.599342 + 9.603147j,
-17.929195 + 11.379494j,
-17.123228 + 13.173901j,
-16.166808 + 14.992008j,
-15.039580 + 16.841580j,
-13.712245 + 18.733902j,
-12.140295 + 20.686563j,
-10.250119 + 22.729808j,
- 7.901170 + 24.924391j,
- 4.734679 + 27.435615j,
]
assert_allclose(sorted(p, key=np.imag),
sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
def test_refs(self):
# Compare to http://www.crbond.com/papers/bsf2.pdf
# "Delay Normalized Bessel Polynomial Coefficients"
bond_b = 10395
bond_a = [1, 21, 210, 1260, 4725, 10395, 10395]
b, a = bessel(6, 1, norm='delay', analog=True)
assert_allclose(bond_b, b)
assert_allclose(bond_a, a)
# "Delay Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.5000000000 + 0.8660254038j],
3: [-1.8389073227 + 1.7543809598j, -2.3221853546],
4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j],
5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j,
-3.6467385953],
6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j,
-4.2483593959 + 0.8675096732j],
7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j,
-4.7582905282 + 1.7392860611j, -4.9717868585],
8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j,
-5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j],
9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j,
-5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j,
-6.2970191817],
10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j,
-5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j,
-6.9220449054 + 0.8676651955j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1])))
assert_array_almost_equal(p1, p2, decimal=10)
# "Frequency Normalized Bessel Pole Locations"
bond_poles = {
1: [-1.0000000000],
2: [-1.1016013306 + 0.6360098248j],
3: [-1.0474091610 + 0.9992644363j, -1.3226757999],
4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j],
5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j,
-1.5023162714],
6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j,
-1.5714904036 + 0.3208963742j],
7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j,
-1.6120387662 + 0.5892445069j, -1.6843681793],
8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j,
-1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j],
9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j,
-1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j,
-1.8566005012],
10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j,
-1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j,
-1.9276196914 + 0.2416234710j]
}
for N in range(1, 11):
p1 = np.sort(bond_poles[N])
p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1])))
assert_array_almost_equal(p1, p2, decimal=10)
# Compare to https://www.ranecommercial.com/legacy/note147.html
# "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order"
a = [1, 1, 1/3]
b2, a2 = bessel(2, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, 1, 2/5, 1/15]
b2, a2 = bessel(3, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, 1, 9/21, 2/21, 1/105]
b2, a2 = bessel(4, 1, norm='delay', analog=True)
assert_allclose(a[::-1], a2/b2)
a = [1, np.sqrt(3), 1]
b2, a2 = bessel(2, 1, norm='phase', analog=True)
assert_allclose(a[::-1], a2/b2)
# TODO: Why so inaccurate? Is reference flawed?
a = [1, 2.481, 2.463, 1.018]
b2, a2 = bessel(3, 1, norm='phase', analog=True)
assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
# TODO: Why so inaccurate? Is reference flawed?
a = [1, 3.240, 4.5, 3.240, 1.050]
b2, a2 = bessel(4, 1, norm='phase', analog=True)
assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
# Table of -3 dB factors:
N, scale = 2, 1.272
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=3)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 3, 1.413
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=2)
# TODO: Why so inaccurate? Is reference flawed?
N, scale = 4, 1.533
scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
assert_array_almost_equal(scale, scale2, decimal=1)
def test_hardcoded(self):
# Compare to values from original hardcoded implementation
originals = {
0: [],
1: [-1],
2: [-.8660254037844386467637229 + .4999999999999999999999996j],
3: [-.9416000265332067855971980,
-.7456403858480766441810907 + .7113666249728352680992154j],
4: [-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642624 + .2709187330038746636700926j],
5: [-.9264420773877602247196260,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 + .9072067564574549539291747j],
6: [-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 + .9616876881954277199245657j],
7: [-.9194871556490290014311619,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 + 1.002508508454420401230220j],
8: [-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 + 1.034388681126901058116589j],
9: [-.9154957797499037686769223,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 + 1.060073670135929666774323j],
10: [-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 + 1.081274842819124562037210j],
11: [-.9129067244518981934637318,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 + 1.099117466763120928733632j],
12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 + 1.114373575641546257595657j],
13: [-.9110914665984182781070663,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 + 1.127591548317705678613239j],
14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 + 1.139172297839859991370924j],
15: [-.9097482363849064167228581,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 + 1.149416154583629539665297j],
16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 + 1.158552841199330479412225j],
17: [-.9087141161336397432860029,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 + 1.166761272925668786676672j],
18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 + 1.174183010600059128532230j],
19: [-.9078934217899404528985092,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 + 1.180931628453291873626003j],
20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 + 1.187099379810885886139638j],
21: [-.9072262653142957028884077,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 + 1.192762031948052470183960j],
22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 + 1.197982433555213008346532j],
23: [-.9066732476324988168207439,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 + 1.202813187870697831365338j],
24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 + 1.207298683731972524975429j],
25: [-.9062073871811708652496104,
-.9028833390228020537142561 + 93077131185102967450643820e-27j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 + 1.211476658382565356579418j],
}
for N in originals:
p1 = sorted(np.union1d(originals[N],
np.conj(originals[N])), key=np.imag)
p2 = sorted(besselap(N)[1], key=np.imag)
assert_allclose(p1, p2, rtol=1e-14)
def test_norm_phase(self):
# Test some orders and frequencies and see that they have the right
# phase at w0
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='phase')
w = np.linspace(0, w0, 100)
w, h = freqs(b, a, w)
phase = np.unwrap(np.angle(h))
assert_allclose(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1)
def test_norm_mag(self):
# Test some orders and frequencies and see that they have the right
# mag at w0
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='mag')
w = (0, w0)
w, h = freqs(b, a, w)
mag = abs(h)
assert_allclose(mag, (1, 1/np.sqrt(2)))
def test_norm_delay(self):
# Test some orders and frequencies and see that they have the right
# delay at DC
for N in (1, 2, 3, 4, 5, 51, 72):
for w0 in (1, 100):
b, a = bessel(N, w0, analog=True, norm='delay')
w = np.linspace(0, 10*w0, 1000)
w, h = freqs(b, a, w)
delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w)
assert_allclose(delay[0], 1/w0, rtol=1e-4)
def test_norm_factor(self):
mpmath_values = {
1: 1, 2: 1.361654128716130520, 3: 1.755672368681210649,
4: 2.113917674904215843, 5: 2.427410702152628137,
6: 2.703395061202921876, 7: 2.951722147038722771,
8: 3.179617237510651330, 9: 3.391693138911660101,
10: 3.590980594569163482, 11: 3.779607416439620092,
12: 3.959150821144285315, 13: 4.130825499383535980,
14: 4.295593409533637564, 15: 4.454233021624377494,
16: 4.607385465472647917, 17: 4.755586548961147727,
18: 4.899289677284488007, 19: 5.038882681488207605,
20: 5.174700441742707423, 21: 5.307034531360917274,
22: 5.436140703250035999, 23: 5.562244783787878196,
24: 5.685547371295963521, 25: 5.806227623775418541,
50: 8.268963160013226298, 51: 8.352374541546012058,
}
for N in mpmath_values:
z, p, k = besselap(N, 'delay')
assert_allclose(mpmath_values[N], _norm_factor(p, k), rtol=1e-13)
def test_bessel_poly(self):
assert_array_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1])
assert_array_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105])
def test_bessel_zeros(self):
assert_array_equal(_bessel_zeros(0), [])
def test_invalid(self):
assert_raises(ValueError, besselap, 5, 'nonsense')
assert_raises(ValueError, besselap, -5)
assert_raises(ValueError, besselap, 3.2)
assert_raises(ValueError, _bessel_poly, -3)
assert_raises(ValueError, _bessel_poly, 3.3)
def test_fs_param(self):
for norm in ('phase', 'mag', 'delay'):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = bessel(N, fc, btype, fs=fs)
ba2 = bessel(N, fc/(fs/2), btype)
assert_allclose(ba1, ba2)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = bessel(N, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = bessel(N, fcnorm, btype)
assert_allclose(ba1, ba2)
class TestButter:
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = butter(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = butter(1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = butter(1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [3.249196962329063e-01], rtol=1e-14)
assert_allclose(k, 3.375401518835469e-01, rtol=1e-14)
def test_basic(self):
# analog s-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
# All poles should be at distance wn from origin
assert_array_almost_equal(wn, abs(p))
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
assert_array_almost_equal(wn**N, k)
# digital z-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b1, a1 = butter(2, 1, analog=True)
assert_array_almost_equal(b1, [1])
assert_array_almost_equal(a1, [1, np.sqrt(2), 1])
b2, a2 = butter(5, 1, analog=True)
assert_array_almost_equal(b2, [1])
assert_array_almost_equal(a2, [1, 3.2361, 5.2361,
5.2361, 3.2361, 1], decimal=4)
b3, a3 = butter(10, 1, analog=True)
assert_array_almost_equal(b3, [1])
assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824,
74.2334, 64.8824, 42.8021, 20.4317,
6.3925, 1], decimal=4)
b2, a2 = butter(19, 1.0441379169150726, analog=True)
assert_array_almost_equal(b2, [2.2720], decimal=4)
assert_array_almost_equal(a2, 1.0e+004 * np.array([
0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570,
0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044,
1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153,
0.0026, 0.0002]), decimal=0)
b, a = butter(5, 0.4)
assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194,
0.2194, 0.1097, 0.0219], decimal=4)
assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738,
-0.3864, 0.1112, -0.0113], decimal=4)
def test_highpass(self):
# highpass, high even order
z, p, k = butter(28, 0.43, 'high', output='zpk')
z2 = np.ones(28)
p2 = [
2.068257195514592e-01 + 9.238294351481734e-01j,
2.068257195514592e-01 - 9.238294351481734e-01j,
1.874933103892023e-01 + 8.269455076775277e-01j,
1.874933103892023e-01 - 8.269455076775277e-01j,
1.717435567330153e-01 + 7.383078571194629e-01j,
1.717435567330153e-01 - 7.383078571194629e-01j,
1.588266870755982e-01 + 6.564623730651094e-01j,
1.588266870755982e-01 - 6.564623730651094e-01j,
1.481881532502603e-01 + 5.802343458081779e-01j,
1.481881532502603e-01 - 5.802343458081779e-01j,
1.394122576319697e-01 + 5.086609000582009e-01j,
1.394122576319697e-01 - 5.086609000582009e-01j,
1.321840881809715e-01 + 4.409411734716436e-01j,
1.321840881809715e-01 - 4.409411734716436e-01j,
1.262633413354405e-01 + 3.763990035551881e-01j,
1.262633413354405e-01 - 3.763990035551881e-01j,
1.214660449478046e-01 + 3.144545234797277e-01j,
1.214660449478046e-01 - 3.144545234797277e-01j,
1.104868766650320e-01 + 2.771505404367791e-02j,
1.104868766650320e-01 - 2.771505404367791e-02j,
1.111768629525075e-01 + 8.331369153155753e-02j,
1.111768629525075e-01 - 8.331369153155753e-02j,
1.125740630842972e-01 + 1.394219509611784e-01j,
1.125740630842972e-01 - 1.394219509611784e-01j,
1.147138487992747e-01 + 1.963932363793666e-01j,
1.147138487992747e-01 - 1.963932363793666e-01j,
1.176516491045901e-01 + 2.546021573417188e-01j,
1.176516491045901e-01 - 2.546021573417188e-01j,
]
k2 = 1.446671081817286e-06
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-7)
assert_allclose(k, k2, rtol=1e-10)
# highpass, high odd order
z, p, k = butter(27, 0.56, 'high', output='zpk')
z2 = np.ones(27)
p2 = [
-1.772572785680147e-01 + 9.276431102995948e-01j,
-1.772572785680147e-01 - 9.276431102995948e-01j,
-1.600766565322114e-01 + 8.264026279893268e-01j,
-1.600766565322114e-01 - 8.264026279893268e-01j,
-1.461948419016121e-01 + 7.341841939120078e-01j,
-1.461948419016121e-01 - 7.341841939120078e-01j,
-1.348975284762046e-01 + 6.493235066053785e-01j,
-1.348975284762046e-01 - 6.493235066053785e-01j,
-1.256628210712206e-01 + 5.704921366889227e-01j,
-1.256628210712206e-01 - 5.704921366889227e-01j,
-1.181038235962314e-01 + 4.966120551231630e-01j,
-1.181038235962314e-01 - 4.966120551231630e-01j,
-1.119304913239356e-01 + 4.267938916403775e-01j,
-1.119304913239356e-01 - 4.267938916403775e-01j,
-1.069237739782691e-01 + 3.602914879527338e-01j,
-1.069237739782691e-01 - 3.602914879527338e-01j,
-1.029178030691416e-01 + 2.964677964142126e-01j,
-1.029178030691416e-01 - 2.964677964142126e-01j,
-9.978747500816100e-02 + 2.347687643085738e-01j,
-9.978747500816100e-02 - 2.347687643085738e-01j,
-9.743974496324025e-02 + 1.747028739092479e-01j,
-9.743974496324025e-02 - 1.747028739092479e-01j,
-9.580754551625957e-02 + 1.158246860771989e-01j,
-9.580754551625957e-02 - 1.158246860771989e-01j,
-9.484562207782568e-02 + 5.772118357151691e-02j,
-9.484562207782568e-02 - 5.772118357151691e-02j,
-9.452783117928215e-02
]
k2 = 9.585686688851069e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-8)
assert_allclose(k, k2)
def test_bandpass(self):
z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, -1, -1]
p2 = [
4.979909925436156e-01 + 8.367609424799387e-01j,
4.979909925436156e-01 - 8.367609424799387e-01j,
4.913338722555539e-01 + 7.866774509868817e-01j,
4.913338722555539e-01 - 7.866774509868817e-01j,
5.035229361778706e-01 + 7.401147376726750e-01j,
5.035229361778706e-01 - 7.401147376726750e-01j,
5.307617160406101e-01 + 7.029184459442954e-01j,
5.307617160406101e-01 - 7.029184459442954e-01j,
5.680556159453138e-01 + 6.788228792952775e-01j,
5.680556159453138e-01 - 6.788228792952775e-01j,
6.100962560818854e-01 + 6.693849403338664e-01j,
6.100962560818854e-01 - 6.693849403338664e-01j,
6.904694312740631e-01 + 6.930501690145245e-01j,
6.904694312740631e-01 - 6.930501690145245e-01j,
6.521767004237027e-01 + 6.744414640183752e-01j,
6.521767004237027e-01 - 6.744414640183752e-01j,
]
k2 = 3.398854055800844e-08
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
# bandpass analog
z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk')
z2 = np.zeros(4)
p2 = [
-4.179137760733086e+00 + 1.095935899082837e+02j,
-4.179137760733086e+00 - 1.095935899082837e+02j,
-9.593598668443835e+00 + 1.034745398029734e+02j,
-9.593598668443835e+00 - 1.034745398029734e+02j,
-8.883991981781929e+00 + 9.582087115567160e+01j,
-8.883991981781929e+00 - 9.582087115567160e+01j,
-3.474530886568715e+00 + 9.111599925805801e+01j,
-3.474530886568715e+00 - 9.111599925805801e+01j,
]
k2 = 1.600000000000001e+05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-15)
def test_bandstop(self):
z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk')
z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j]
p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j,
-1.766850742887729e-01 - 9.466951258673900e-01j,
1.467897662432886e-01 + 9.515917126462422e-01j,
1.467897662432886e-01 - 9.515917126462422e-01j,
-1.370083529426906e-01 + 8.880376681273993e-01j,
-1.370083529426906e-01 - 8.880376681273993e-01j,
1.086774544701390e-01 + 8.915240810704319e-01j,
1.086774544701390e-01 - 8.915240810704319e-01j,
-7.982704457700891e-02 + 8.506056315273435e-01j,
-7.982704457700891e-02 - 8.506056315273435e-01j,
5.238812787110331e-02 + 8.524011102699969e-01j,
5.238812787110331e-02 - 8.524011102699969e-01j,
-1.357545000491310e-02 + 8.382287744986582e-01j,
-1.357545000491310e-02 - 8.382287744986582e-01j]
k2 = 4.577122512960063e-01
assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag))
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
def test_ba_output(self):
b, a = butter(4, [100, 300], 'bandpass', analog=True)
b2 = [1.6e+09, 0, 0, 0, 0]
a2 = [1.000000000000000e+00, 5.226251859505511e+02,
2.565685424949238e+05, 6.794127417357160e+07,
1.519411254969542e+10, 2.038238225207147e+12,
2.309116882454312e+14, 1.411088002066486e+16,
8.099999999999991e+17]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
def test_fs_param(self):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = butter(N, fc, btype, fs=fs)
ba2 = butter(N, fc/(fs/2), btype)
assert_allclose(ba1, ba2)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = butter(N, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = butter(N, fcnorm, btype)
assert_allclose(ba1, ba2)
class TestCheby1:
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
b, a = cheby1(0, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby1(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby1(1, 0.1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14)
assert_allclose(k, 7.695063486399808e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
# Same test as TestNormalize
b, a = cheby1(8, 0.5, 0.048)
assert_array_almost_equal(b, [
2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11], decimal=14)
assert_array_almost_equal(a, [
1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01], decimal=14)
b, a = cheby1(4, 1, [0.4, 0.7], btype='band')
assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0,
-0.0335, 0, 0.0084], decimal=4)
assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137,
1.8653, 1.8982, 0.5676, 0.4103],
decimal=4)
b2, a2 = cheby1(5, 3, 1, analog=True)
assert_array_almost_equal(b2, [0.0626], decimal=4)
assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080,
0.0626], decimal=4)
b, a = cheby1(8, 0.5, 0.1)
assert_array_almost_equal(b, 1.0e-006 * np.array([
0.00703924326028, 0.05631394608227, 0.19709881128793,
0.39419762257586, 0.49274702821983, 0.39419762257586,
0.19709881128793, 0.05631394608227, 0.00703924326028]),
decimal=13)
assert_array_almost_equal(a, [
1.00000000000000, -7.44912258934158, 24.46749067762108,
-46.27560200466141, 55.11160187999928, -42.31640010161038,
20.45543300484147, -5.69110270561444, 0.69770374759022],
decimal=13)
b, a = cheby1(8, 0.5, 0.25)
assert_array_almost_equal(b, 1.0e-003 * np.array([
0.00895261138923, 0.07162089111382, 0.25067311889837,
0.50134623779673, 0.62668279724591, 0.50134623779673,
0.25067311889837, 0.07162089111382, 0.00895261138923]),
decimal=13)
assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545,
16.58122329202101, -27.71423273542923,
30.39509758355313, -22.34729670426879,
10.74509800434910, -3.08924633697497,
0.40707685889802], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk')
z2 = np.ones(24)
p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j,
-6.136558509657073e-01 - 2.700091504942893e-01j,
-3.303348340927516e-01 + 6.659400861114254e-01j,
-3.303348340927516e-01 - 6.659400861114254e-01j,
8.779713780557169e-03 + 8.223108447483040e-01j,
8.779713780557169e-03 - 8.223108447483040e-01j,
2.742361123006911e-01 + 8.356666951611864e-01j,
2.742361123006911e-01 - 8.356666951611864e-01j,
4.562984557158206e-01 + 7.954276912303594e-01j,
4.562984557158206e-01 - 7.954276912303594e-01j,
5.777335494123628e-01 + 7.435821817961783e-01j,
5.777335494123628e-01 - 7.435821817961783e-01j,
6.593260977749194e-01 + 6.955390907990932e-01j,
6.593260977749194e-01 - 6.955390907990932e-01j,
7.149590948466562e-01 + 6.559437858502012e-01j,
7.149590948466562e-01 - 6.559437858502012e-01j,
7.532432388188739e-01 + 6.256158042292060e-01j,
7.532432388188739e-01 - 6.256158042292060e-01j,
7.794365244268271e-01 + 6.042099234813333e-01j,
7.794365244268271e-01 - 6.042099234813333e-01j,
7.967253874772997e-01 + 5.911966597313203e-01j,
7.967253874772997e-01 - 5.911966597313203e-01j,
8.069756417293870e-01 + 5.862214589217275e-01j,
8.069756417293870e-01 - 5.862214589217275e-01j]
k2 = 6.190427617192018e-04
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
# high odd order
z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk')
z2 = np.ones(23)
p2 = [-7.676400532011010e-01,
-6.754621070166477e-01 + 3.970502605619561e-01j,
-6.754621070166477e-01 - 3.970502605619561e-01j,
-4.528880018446727e-01 + 6.844061483786332e-01j,
-4.528880018446727e-01 - 6.844061483786332e-01j,
-1.986009130216447e-01 + 8.382285942941594e-01j,
-1.986009130216447e-01 - 8.382285942941594e-01j,
2.504673931532608e-02 + 8.958137635794080e-01j,
2.504673931532608e-02 - 8.958137635794080e-01j,
2.001089429976469e-01 + 9.010678290791480e-01j,
2.001089429976469e-01 - 9.010678290791480e-01j,
3.302410157191755e-01 + 8.835444665962544e-01j,
3.302410157191755e-01 - 8.835444665962544e-01j,
4.246662537333661e-01 + 8.594054226449009e-01j,
4.246662537333661e-01 - 8.594054226449009e-01j,
4.919620928120296e-01 + 8.366772762965786e-01j,
4.919620928120296e-01 - 8.366772762965786e-01j,
5.385746917494749e-01 + 8.191616180796720e-01j,
5.385746917494749e-01 - 8.191616180796720e-01j,
5.855636993537203e-01 + 8.060680937701062e-01j,
5.855636993537203e-01 - 8.060680937701062e-01j,
5.688812849391721e-01 + 8.086497795114683e-01j,
5.688812849391721e-01 - 8.086497795114683e-01j]
k2 = 1.941697029206324e-05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk')
z2 = np.zeros(10)
p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j,
-3.144743169501551e+03 - 3.511680029092744e+03j,
-5.633065604514602e+02 + 2.023615191183945e+03j,
-5.633065604514602e+02 - 2.023615191183945e+03j,
-1.946412183352025e+02 + 1.372309454274755e+03j,
-1.946412183352025e+02 - 1.372309454274755e+03j,
-7.987162953085479e+01 + 1.105207708045358e+03j,
-7.987162953085479e+01 - 1.105207708045358e+03j,
-2.250315039031946e+01 + 1.001723931471477e+03j,
-2.250315039031946e+01 - 1.001723931471477e+03j]
k2 = 8.912509381337453e-01
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_bandpass(self):
z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]
p2 = [3.077784854851463e-01 + 9.453307017592942e-01j,
3.077784854851463e-01 - 9.453307017592942e-01j,
3.280567400654425e-01 + 9.272377218689016e-01j,
3.280567400654425e-01 - 9.272377218689016e-01j,
3.677912763284301e-01 + 9.038008865279966e-01j,
3.677912763284301e-01 - 9.038008865279966e-01j,
4.194425632520948e-01 + 8.769407159656157e-01j,
4.194425632520948e-01 - 8.769407159656157e-01j,
4.740921994669189e-01 + 8.496508528630974e-01j,
4.740921994669189e-01 - 8.496508528630974e-01j,
5.234866481897429e-01 + 8.259608422808477e-01j,
5.234866481897429e-01 - 8.259608422808477e-01j,
5.844717632289875e-01 + 8.052901363500210e-01j,
5.844717632289875e-01 - 8.052901363500210e-01j,
5.615189063336070e-01 + 8.100667803850766e-01j,
5.615189063336070e-01 - 8.100667803850766e-01j]
k2 = 5.007028718074307e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
def test_bandstop(self):
z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk')
z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j]
p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j,
-8.942974551472813e-02 - 3.482480481185926e-01j,
1.293775154041798e-01 + 8.753499858081858e-01j,
1.293775154041798e-01 - 8.753499858081858e-01j,
3.399741945062013e-02 + 9.690316022705607e-01j,
3.399741945062013e-02 - 9.690316022705607e-01j,
4.167225522796539e-04 + 9.927338161087488e-01j,
4.167225522796539e-04 - 9.927338161087488e-01j,
-3.912966549550960e-01 + 8.046122859255742e-01j,
-3.912966549550960e-01 - 8.046122859255742e-01j,
-3.307805547127368e-01 + 9.133455018206508e-01j,
-3.307805547127368e-01 - 9.133455018206508e-01j,
-3.072658345097743e-01 + 9.443589759799366e-01j,
-3.072658345097743e-01 - 9.443589759799366e-01j]
k2 = 3.619438310405028e-01
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-13)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True)
b2 = [1.000000000000006e+00, 0,
3.255000000000020e+05, 0,
4.238010000000026e+10, 0,
2.758944510000017e+15, 0,
8.980364380050052e+19, 0,
1.169243442282517e+24
]
a2 = [1.000000000000000e+00, 4.630555945694342e+02,
4.039266454794788e+05, 1.338060988610237e+08,
5.844333551294591e+10, 1.357346371637638e+13,
3.804661141892782e+15, 5.670715850340080e+17,
1.114411200988328e+20, 8.316815934908471e+21,
1.169243442282517e+24
]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
def test_fs_param(self):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = cheby1(N, 1, fc, btype, fs=fs)
ba2 = cheby1(N, 1, fc/(fs/2), btype)
assert_allclose(ba1, ba2)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = cheby1(N, 1, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = cheby1(N, 1, fcnorm, btype)
assert_allclose(ba1, ba2)
class TestCheby2:
def test_degenerate(self):
# 0-order filter is just a passthrough
# Stopband ripple factor doesn't matter
b, a = cheby2(0, 123.456, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby2(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby2(1, 50, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [9.967826460175649e-01], rtol=1e-14)
assert_allclose(k, 1.608676991217512e-03, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
B, A = cheby2(18, 100, 0.5)
assert_array_almost_equal(B, [
0.00167583914216, 0.01249479541868, 0.05282702120282,
0.15939804265706, 0.37690207631117, 0.73227013789108,
1.20191856962356, 1.69522872823393, 2.07598674519837,
2.21972389625291, 2.07598674519838, 1.69522872823395,
1.20191856962359, 0.73227013789110, 0.37690207631118,
0.15939804265707, 0.05282702120282, 0.01249479541868,
0.00167583914216], decimal=13)
assert_array_almost_equal(A, [
1.00000000000000, -0.27631970006174, 3.19751214254060,
-0.15685969461355, 4.13926117356269, 0.60689917820044,
2.95082770636540, 0.89016501910416, 1.32135245849798,
0.51502467236824, 0.38906643866660, 0.15367372690642,
0.07255803834919, 0.02422454070134, 0.00756108751837,
0.00179848550988, 0.00033713574499, 0.00004258794833,
0.00000281030149], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk')
z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
9.981088955489852e-01 - 6.147058341984388e-02j,
9.832702870387426e-01 + 1.821525257215483e-01j,
9.832702870387426e-01 - 1.821525257215483e-01j,
9.550760158089112e-01 + 2.963609353922882e-01j,
9.550760158089112e-01 - 2.963609353922882e-01j,
9.162054748821922e-01 + 4.007087817803773e-01j,
9.162054748821922e-01 - 4.007087817803773e-01j,
8.700619897368064e-01 + 4.929423232136168e-01j,
8.700619897368064e-01 - 4.929423232136168e-01j,
5.889791753434985e-01 + 8.081482110427953e-01j,
5.889791753434985e-01 - 8.081482110427953e-01j,
5.984900456570295e-01 + 8.011302423760501e-01j,
5.984900456570295e-01 - 8.011302423760501e-01j,
6.172880888914629e-01 + 7.867371958365343e-01j,
6.172880888914629e-01 - 7.867371958365343e-01j,
6.448899971038180e-01 + 7.642754030030161e-01j,
6.448899971038180e-01 - 7.642754030030161e-01j,
6.804845629637927e-01 + 7.327624168637228e-01j,
6.804845629637927e-01 - 7.327624168637228e-01j,
8.202619107108660e-01 + 5.719881098737678e-01j,
8.202619107108660e-01 - 5.719881098737678e-01j,
7.228410452536148e-01 + 6.910143437705678e-01j,
7.228410452536148e-01 - 6.910143437705678e-01j,
7.702121399578629e-01 + 6.377877856007792e-01j,
7.702121399578629e-01 - 6.377877856007792e-01j]
p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
7.365546198286450e-01 - 4.842085129329526e-02j,
7.292038510962885e-01 + 1.442201672097581e-01j,
7.292038510962885e-01 - 1.442201672097581e-01j,
7.151293788040354e-01 + 2.369925800458584e-01j,
7.151293788040354e-01 - 2.369925800458584e-01j,
6.955051820787286e-01 + 3.250341363856910e-01j,
6.955051820787286e-01 - 3.250341363856910e-01j,
6.719122956045220e-01 + 4.070475750638047e-01j,
6.719122956045220e-01 - 4.070475750638047e-01j,
6.461722130611300e-01 + 4.821965916689270e-01j,
6.461722130611300e-01 - 4.821965916689270e-01j,
5.528045062872224e-01 + 8.162920513838372e-01j,
5.528045062872224e-01 - 8.162920513838372e-01j,
5.464847782492791e-01 + 7.869899955967304e-01j,
5.464847782492791e-01 - 7.869899955967304e-01j,
5.488033111260949e-01 + 7.520442354055579e-01j,
5.488033111260949e-01 - 7.520442354055579e-01j,
6.201874719022955e-01 + 5.500894392527353e-01j,
6.201874719022955e-01 - 5.500894392527353e-01j,
5.586478152536709e-01 + 7.112676877332921e-01j,
5.586478152536709e-01 - 7.112676877332921e-01j,
5.958145844148228e-01 + 6.107074340842115e-01j,
5.958145844148228e-01 - 6.107074340842115e-01j,
5.747812938519067e-01 + 6.643001536914696e-01j,
5.747812938519067e-01 - 6.643001536914696e-01j]
k2 = 9.932997786497189e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
# high odd order
z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk')
z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
9.690690376586687e-01 - 2.467897896011971e-01j,
9.999999999999492e-01,
8.835111277191199e-01 + 4.684101698261429e-01j,
8.835111277191199e-01 - 4.684101698261429e-01j,
7.613142857900539e-01 + 6.483830335935022e-01j,
7.613142857900539e-01 - 6.483830335935022e-01j,
6.232625173626231e-01 + 7.820126817709752e-01j,
6.232625173626231e-01 - 7.820126817709752e-01j,
4.864456563413621e-01 + 8.737108351316745e-01j,
4.864456563413621e-01 - 8.737108351316745e-01j,
3.618368136816749e-01 + 9.322414495530347e-01j,
3.618368136816749e-01 - 9.322414495530347e-01j,
2.549486883466794e-01 + 9.669545833752675e-01j,
2.549486883466794e-01 - 9.669545833752675e-01j,
1.676175432109457e-01 + 9.858520980390212e-01j,
1.676175432109457e-01 - 9.858520980390212e-01j,
1.975218468277521e-03 + 9.999980492540941e-01j,
1.975218468277521e-03 - 9.999980492540941e-01j,
1.786959496651858e-02 + 9.998403260399917e-01j,
1.786959496651858e-02 - 9.998403260399917e-01j,
9.967933660557139e-02 + 9.950196127985684e-01j,
9.967933660557139e-02 - 9.950196127985684e-01j,
5.013970951219547e-02 + 9.987422137518890e-01j,
5.013970951219547e-02 - 9.987422137518890e-01j]
p2 = [4.218866331906864e-01,
4.120110200127552e-01 + 1.361290593621978e-01j,
4.120110200127552e-01 - 1.361290593621978e-01j,
3.835890113632530e-01 + 2.664910809911026e-01j,
3.835890113632530e-01 - 2.664910809911026e-01j,
3.399195570456499e-01 + 3.863983538639875e-01j,
3.399195570456499e-01 - 3.863983538639875e-01j,
2.855977834508353e-01 + 4.929444399540688e-01j,
2.855977834508353e-01 - 4.929444399540688e-01j,
2.255765441339322e-01 + 5.851631870205766e-01j,
2.255765441339322e-01 - 5.851631870205766e-01j,
1.644087535815792e-01 + 6.637356937277153e-01j,
1.644087535815792e-01 - 6.637356937277153e-01j,
-7.293633845273095e-02 + 9.739218252516307e-01j,
-7.293633845273095e-02 - 9.739218252516307e-01j,
1.058259206358626e-01 + 7.304739464862978e-01j,
1.058259206358626e-01 - 7.304739464862978e-01j,
-5.703971947785402e-02 + 9.291057542169088e-01j,
-5.703971947785402e-02 - 9.291057542169088e-01j,
5.263875132656864e-02 + 7.877974334424453e-01j,
5.263875132656864e-02 - 7.877974334424453e-01j,
-3.007943405982616e-02 + 8.846331716180016e-01j,
-3.007943405982616e-02 - 8.846331716180016e-01j,
6.857277464483946e-03 + 8.383275456264492e-01j,
6.857277464483946e-03 - 8.383275456264492e-01j]
k2 = 6.507068761705037e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
def test_bandpass(self):
z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999999e-01,
3.676588029658514e-01 + 9.299607543341383e-01j,
3.676588029658514e-01 - 9.299607543341383e-01j,
7.009689684982283e-01 + 7.131917730894889e-01j,
7.009689684982283e-01 - 7.131917730894889e-01j,
7.815697973765858e-01 + 6.238178033919218e-01j,
7.815697973765858e-01 - 6.238178033919218e-01j,
8.063793628819866e-01 + 5.913986160941200e-01j,
8.063793628819866e-01 - 5.913986160941200e-01j,
1.000000000000001e+00,
9.944493019920448e-01 + 1.052168511576739e-01j,
9.944493019920448e-01 - 1.052168511576739e-01j,
9.854674703367308e-01 + 1.698642543566085e-01j,
9.854674703367308e-01 - 1.698642543566085e-01j,
9.762751735919308e-01 + 2.165335665157851e-01j,
9.762751735919308e-01 - 2.165335665157851e-01j,
9.792277171575134e-01 + 2.027636011479496e-01j,
9.792277171575134e-01 - 2.027636011479496e-01j]
p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
8.143803410489621e-01 - 5.411056063397541e-01j,
7.650769827887418e-01 + 5.195412242095543e-01j,
7.650769827887418e-01 - 5.195412242095543e-01j,
6.096241204063443e-01 + 3.568440484659796e-01j,
6.096241204063443e-01 - 3.568440484659796e-01j,
6.918192770246239e-01 + 4.770463577106911e-01j,
6.918192770246239e-01 - 4.770463577106911e-01j,
6.986241085779207e-01 + 1.146512226180060e-01j,
6.986241085779207e-01 - 1.146512226180060e-01j,
8.654645923909734e-01 + 1.604208797063147e-01j,
8.654645923909734e-01 - 1.604208797063147e-01j,
9.164831670444591e-01 + 1.969181049384918e-01j,
9.164831670444591e-01 - 1.969181049384918e-01j,
9.630425777594550e-01 + 2.317513360702271e-01j,
9.630425777594550e-01 - 2.317513360702271e-01j,
9.438104703725529e-01 + 2.193509900269860e-01j,
9.438104703725529e-01 - 2.193509900269860e-01j]
k2 = 9.345352824659604e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_bandstop(self):
z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk')
z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
6.230544895101009e-01 - 7.821784343111114e-01j,
9.086608545660115e-01 + 4.175349702471991e-01j,
9.086608545660115e-01 - 4.175349702471991e-01j,
9.478129721465802e-01 + 3.188268649763867e-01j,
9.478129721465802e-01 - 3.188268649763867e-01j,
-6.230544895100982e-01 + 7.821784343111109e-01j,
-6.230544895100982e-01 - 7.821784343111109e-01j,
-9.086608545660116e-01 + 4.175349702472088e-01j,
-9.086608545660116e-01 - 4.175349702472088e-01j,
-9.478129721465784e-01 + 3.188268649763897e-01j,
-9.478129721465784e-01 - 3.188268649763897e-01j]
p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
-9.464094036167638e-01 - 1.720048695084344e-01j,
-8.715844103386737e-01 + 1.370665039509297e-01j,
-8.715844103386737e-01 - 1.370665039509297e-01j,
-8.078751204586425e-01 + 5.729329866682983e-02j,
-8.078751204586425e-01 - 5.729329866682983e-02j,
9.464094036167665e-01 + 1.720048695084332e-01j,
9.464094036167665e-01 - 1.720048695084332e-01j,
8.078751204586447e-01 + 5.729329866683007e-02j,
8.078751204586447e-01 - 5.729329866683007e-02j,
8.715844103386721e-01 + 1.370665039509331e-01j,
8.715844103386721e-01 - 1.370665039509331e-01j]
k2 = 2.917823332763358e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby2(5, 20, [2010, 2100], 'stop', True)
b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12,
2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04,
1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02,
7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09,
1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15,
1.339913493808585e+33]
a2 = [1.000000000000000e+00, 1.849550755473371e+02,
2.113222918998538e+07, 3.125114149732283e+09,
1.785133457155609e+14, 1.979158697776348e+16,
7.535048322653831e+20, 5.567966191263037e+22,
1.589246884221346e+27, 5.871210648525566e+28,
1.339913493808590e+33]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
def test_fs_param(self):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = cheby2(N, 20, fc, btype, fs=fs)
ba2 = cheby2(N, 20, fc/(fs/2), btype)
assert_allclose(ba1, ba2)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = cheby2(N, 20, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = cheby2(N, 20, fcnorm, btype)
assert_allclose(ba1, ba2)
class TestEllip:
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
# Stopband ripple factor doesn't matter
b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = ellip(1, 1, 55, 0.3, output='zpk')
assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14)
assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10)
assert_allclose(k, 5.003330360576763e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b3, a3 = ellip(5, 3, 26, 1, analog=True)
assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0,
0.2409], decimal=4)
assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012,
0.2409], decimal=4)
b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop')
assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042,
0.3469, 0.3310], decimal=4)
assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323,
0.1131, -0.0060], decimal=4)
def test_highpass(self):
# high even order
z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk')
z2 = [9.761875332501075e-01 + 2.169283290099910e-01j,
9.761875332501075e-01 - 2.169283290099910e-01j,
8.413503353963494e-01 + 5.404901600661900e-01j,
8.413503353963494e-01 - 5.404901600661900e-01j,
7.160082576305009e-01 + 6.980918098681732e-01j,
7.160082576305009e-01 - 6.980918098681732e-01j,
6.456533638965329e-01 + 7.636306264739803e-01j,
6.456533638965329e-01 - 7.636306264739803e-01j,
6.127321820971366e-01 + 7.902906256703928e-01j,
6.127321820971366e-01 - 7.902906256703928e-01j,
5.983607817490196e-01 + 8.012267936512676e-01j,
5.983607817490196e-01 - 8.012267936512676e-01j,
5.922577552594799e-01 + 8.057485658286990e-01j,
5.922577552594799e-01 - 8.057485658286990e-01j,
5.896952092563588e-01 + 8.076258788449631e-01j,
5.896952092563588e-01 - 8.076258788449631e-01j,
5.886248765538837e-01 + 8.084063054565607e-01j,
5.886248765538837e-01 - 8.084063054565607e-01j,
5.881802711123132e-01 + 8.087298490066037e-01j,
5.881802711123132e-01 - 8.087298490066037e-01j,
5.879995719101164e-01 + 8.088612386766461e-01j,
5.879995719101164e-01 - 8.088612386766461e-01j,
5.879354086709576e-01 + 8.089078780868164e-01j,
5.879354086709576e-01 - 8.089078780868164e-01j]
p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j,
-3.184805259081650e-01 - 4.206951906775851e-01j,
1.417279173459985e-01 + 7.903955262836452e-01j,
1.417279173459985e-01 - 7.903955262836452e-01j,
4.042881216964651e-01 + 8.309042239116594e-01j,
4.042881216964651e-01 - 8.309042239116594e-01j,
5.128964442789670e-01 + 8.229563236799665e-01j,
5.128964442789670e-01 - 8.229563236799665e-01j,
5.569614712822724e-01 + 8.155957702908510e-01j,
5.569614712822724e-01 - 8.155957702908510e-01j,
5.750478870161392e-01 + 8.118633973883931e-01j,
5.750478870161392e-01 - 8.118633973883931e-01j,
5.825314018170804e-01 + 8.101960910679270e-01j,
5.825314018170804e-01 - 8.101960910679270e-01j,
5.856397379751872e-01 + 8.094825218722543e-01j,
5.856397379751872e-01 - 8.094825218722543e-01j,
5.869326035251949e-01 + 8.091827531557583e-01j,
5.869326035251949e-01 - 8.091827531557583e-01j,
5.874697218855733e-01 + 8.090593298213502e-01j,
5.874697218855733e-01 - 8.090593298213502e-01j,
5.876904783532237e-01 + 8.090127161018823e-01j,
5.876904783532237e-01 - 8.090127161018823e-01j,
5.877753105317594e-01 + 8.090050577978136e-01j,
5.877753105317594e-01 - 8.090050577978136e-01j]
k2 = 4.918081266957108e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
# high odd order
z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk')
z2 = [9.999999999998661e-01,
6.603717261750994e-01 + 7.509388678638675e-01j,
6.603717261750994e-01 - 7.509388678638675e-01j,
2.788635267510325e-01 + 9.603307416968041e-01j,
2.788635267510325e-01 - 9.603307416968041e-01j,
1.070215532544218e-01 + 9.942567008268131e-01j,
1.070215532544218e-01 - 9.942567008268131e-01j,
4.049427369978163e-02 + 9.991797705105507e-01j,
4.049427369978163e-02 - 9.991797705105507e-01j,
1.531059368627931e-02 + 9.998827859909265e-01j,
1.531059368627931e-02 - 9.998827859909265e-01j,
5.808061438534933e-03 + 9.999831330689181e-01j,
5.808061438534933e-03 - 9.999831330689181e-01j,
2.224277847754599e-03 + 9.999975262909676e-01j,
2.224277847754599e-03 - 9.999975262909676e-01j,
8.731857107534554e-04 + 9.999996187732845e-01j,
8.731857107534554e-04 - 9.999996187732845e-01j,
3.649057346914968e-04 + 9.999999334218996e-01j,
3.649057346914968e-04 - 9.999999334218996e-01j,
1.765538109802615e-04 + 9.999999844143768e-01j,
1.765538109802615e-04 - 9.999999844143768e-01j,
1.143655290967426e-04 + 9.999999934602630e-01j,
1.143655290967426e-04 - 9.999999934602630e-01j]
p2 = [-6.322017026545028e-01,
-4.648423756662754e-01 + 5.852407464440732e-01j,
-4.648423756662754e-01 - 5.852407464440732e-01j,
-2.249233374627773e-01 + 8.577853017985717e-01j,
-2.249233374627773e-01 - 8.577853017985717e-01j,
-9.234137570557621e-02 + 9.506548198678851e-01j,
-9.234137570557621e-02 - 9.506548198678851e-01j,
-3.585663561241373e-02 + 9.821494736043981e-01j,
-3.585663561241373e-02 - 9.821494736043981e-01j,
-1.363917242312723e-02 + 9.933844128330656e-01j,
-1.363917242312723e-02 - 9.933844128330656e-01j,
-5.131505238923029e-03 + 9.975221173308673e-01j,
-5.131505238923029e-03 - 9.975221173308673e-01j,
-1.904937999259502e-03 + 9.990680819857982e-01j,
-1.904937999259502e-03 - 9.990680819857982e-01j,
-6.859439885466834e-04 + 9.996492201426826e-01j,
-6.859439885466834e-04 - 9.996492201426826e-01j,
-2.269936267937089e-04 + 9.998686250679161e-01j,
-2.269936267937089e-04 - 9.998686250679161e-01j,
-5.687071588789117e-05 + 9.999527573294513e-01j,
-5.687071588789117e-05 - 9.999527573294513e-01j,
-6.948417068525226e-07 + 9.999882737700173e-01j,
-6.948417068525226e-07 - 9.999882737700173e-01j]
k2 = 1.220910020289434e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
def test_bandpass(self):
z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999991e-01,
6.856610961780020e-01 + 7.279209168501619e-01j,
6.856610961780020e-01 - 7.279209168501619e-01j,
7.850346167691289e-01 + 6.194518952058737e-01j,
7.850346167691289e-01 - 6.194518952058737e-01j,
7.999038743173071e-01 + 6.001281461922627e-01j,
7.999038743173071e-01 - 6.001281461922627e-01j,
9.999999999999999e-01,
9.862938983554124e-01 + 1.649980183725925e-01j,
9.862938983554124e-01 - 1.649980183725925e-01j,
9.788558330548762e-01 + 2.045513580850601e-01j,
9.788558330548762e-01 - 2.045513580850601e-01j,
9.771155231720003e-01 + 2.127093189691258e-01j,
9.771155231720003e-01 - 2.127093189691258e-01j]
p2 = [8.063992755498643e-01 + 5.858071374778874e-01j,
8.063992755498643e-01 - 5.858071374778874e-01j,
8.050395347071724e-01 + 5.639097428109795e-01j,
8.050395347071724e-01 - 5.639097428109795e-01j,
8.113124936559144e-01 + 4.855241143973142e-01j,
8.113124936559144e-01 - 4.855241143973142e-01j,
8.665595314082394e-01 + 3.334049560919331e-01j,
8.665595314082394e-01 - 3.334049560919331e-01j,
9.412369011968871e-01 + 2.457616651325908e-01j,
9.412369011968871e-01 - 2.457616651325908e-01j,
9.679465190411238e-01 + 2.228772501848216e-01j,
9.679465190411238e-01 - 2.228772501848216e-01j,
9.747235066273385e-01 + 2.178937926146544e-01j,
9.747235066273385e-01 - 2.178937926146544e-01j]
k2 = 8.354782670263239e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk')
z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j,
-5.583607317695175e-14 - 1.433755965989225e+02j,
5.740106416459296e-14 + 1.261678754570291e+02j,
5.740106416459296e-14 - 1.261678754570291e+02j,
-2.199676239638652e-14 + 6.974861996895196e+01j,
-2.199676239638652e-14 - 6.974861996895196e+01j,
-3.372595657044283e-14 + 7.926145989044531e+01j,
-3.372595657044283e-14 - 7.926145989044531e+01j,
0]
p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j,
-8.814960004852743e-01 - 1.104124501436066e+02j,
-2.477372459140184e+00 + 1.065638954516534e+02j,
-2.477372459140184e+00 - 1.065638954516534e+02j,
-3.072156842945799e+00 + 9.995404870405324e+01j,
-3.072156842945799e+00 - 9.995404870405324e+01j,
-2.180456023925693e+00 + 9.379206865455268e+01j,
-2.180456023925693e+00 - 9.379206865455268e+01j,
-7.230484977485752e-01 + 9.056598800801140e+01j,
-7.230484977485752e-01 - 9.056598800801140e+01j]
k2 = 3.774571622827070e-02
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-4)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-6)
assert_allclose(k, k2, rtol=1e-3)
def test_bandstop(self):
z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk')
z2 = [3.528578094286510e-01 + 9.356769561794296e-01j,
3.528578094286510e-01 - 9.356769561794296e-01j,
3.769716042264783e-01 + 9.262248159096587e-01j,
3.769716042264783e-01 - 9.262248159096587e-01j,
4.406101783111199e-01 + 8.976985411420985e-01j,
4.406101783111199e-01 - 8.976985411420985e-01j,
5.539386470258847e-01 + 8.325574907062760e-01j,
5.539386470258847e-01 - 8.325574907062760e-01j,
6.748464963023645e-01 + 7.379581332490555e-01j,
6.748464963023645e-01 - 7.379581332490555e-01j,
7.489887970285254e-01 + 6.625826604475596e-01j,
7.489887970285254e-01 - 6.625826604475596e-01j,
7.913118471618432e-01 + 6.114127579150699e-01j,
7.913118471618432e-01 - 6.114127579150699e-01j,
7.806804740916381e-01 + 6.249303940216475e-01j,
7.806804740916381e-01 - 6.249303940216475e-01j]
p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j,
-1.025299146693730e-01 - 5.662682444754943e-01j,
1.698463595163031e-01 + 8.926678667070186e-01j,
1.698463595163031e-01 - 8.926678667070186e-01j,
2.750532687820631e-01 + 9.351020170094005e-01j,
2.750532687820631e-01 - 9.351020170094005e-01j,
3.070095178909486e-01 + 9.457373499553291e-01j,
3.070095178909486e-01 - 9.457373499553291e-01j,
7.695332312152288e-01 + 2.792567212705257e-01j,
7.695332312152288e-01 - 2.792567212705257e-01j,
8.083818999225620e-01 + 4.990723496863960e-01j,
8.083818999225620e-01 - 4.990723496863960e-01j,
8.066158014414928e-01 + 5.649811440393374e-01j,
8.066158014414928e-01 - 5.649811440393374e-01j,
8.062787978834571e-01 + 5.855780880424964e-01j,
8.062787978834571e-01 - 5.855780880424964e-01j]
k2 = 2.068622545291259e-01
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-6)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-5)
assert_allclose(k, k2, rtol=1e-5)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = ellip(5, 1, 40, [201, 240], 'stop', True)
b2 = [
1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13,
2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08,
2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03,
1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01,
2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06,
2.612380874940186e+23
]
a2 = [
1.000000000000000e+00, 1.337266601804649e+02,
2.486725353510667e+05, 2.628059713728125e+07,
2.436169536928770e+10, 1.913554568577315e+12,
1.175208184614438e+15, 6.115751452473410e+16,
2.791577695211466e+19, 7.241811142725384e+20,
2.612380874940182e+23
]
assert_allclose(b, b2, rtol=1e-6)
assert_allclose(a, a2, rtol=1e-4)
def test_fs_param(self):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = ellip(N, 1, 20, fc, btype, fs=fs)
ba2 = ellip(N, 1, 20, fc/(fs/2), btype)
assert_allclose(ba1, ba2)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = ellip(N, 1, 20, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = ellip(N, 1, 20, fcnorm, btype)
assert_allclose(ba1, ba2)
def test_sos_consistency():
# Consistency checks of output='sos' for the specialized IIR filter
# design functions.
design_funcs = [(bessel, (0.1,)),
(butter, (0.1,)),
(cheby1, (45.0, 0.1)),
(cheby2, (0.087, 0.1)),
(ellip, (0.087, 45, 0.1))]
for func, args in design_funcs:
name = func.__name__
b, a = func(2, *args, output='ba')
sos = func(2, *args, output='sos')
assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name)
zpk = func(3, *args, output='zpk')
sos = func(3, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name)
zpk = func(4, *args, output='zpk')
sos = func(4, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name)
class TestIIRNotch:
def test_ba_output(self):
# Compare coeficients with Matlab ones
# for the equivalent input:
b, a = iirnotch(0.06, 30)
b2 = [
9.9686824e-01, -1.9584219e+00,
9.9686824e-01
]
a2 = [
1.0000000e+00, -1.9584219e+00,
9.9373647e-01
]
assert_allclose(b, b2, rtol=1e-8)
assert_allclose(a, a2, rtol=1e-8)
def test_frequency_response(self):
# Get filter coeficients
b, a = iirnotch(0.3, 30)
# Get frequency response
w, h = freqz(b, a, 1000)
# Pick 5 point
p = [200, # w0 = 0.200
295, # w0 = 0.295
300, # w0 = 0.300
305, # w0 = 0.305
400] # w0 = 0.400
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfill the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 0.3 and should be close to 1
assert_allclose(abs(hp[0]), 1, rtol=1e-2)
assert_allclose(abs(hp[4]), 1, rtol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to -3dB
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be removed
# the frequency response should be very close to 0
assert_allclose(abs(hp[2]), 0, atol=1e-10)
def test_errors(self):
# Exception should be raised if w0 > 1 or w0 <0
assert_raises(ValueError, iirnotch, w0=2, Q=30)
assert_raises(ValueError, iirnotch, w0=-1, Q=30)
# Exception should be raised if any of the parameters
# are not float (or cannot be converted to one)
assert_raises(ValueError, iirnotch, w0="blabla", Q=30)
assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3])
def test_fs_param(self):
# Get filter coeficients
b, a = iirnotch(1500, 30, fs=10000)
# Get frequency response
w, h = freqz(b, a, 1000, fs=10000)
# Pick 5 point
p = [200, # w0 = 1000
295, # w0 = 1475
300, # w0 = 1500
305, # w0 = 1525
400] # w0 = 2000
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfill the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 1500 and should be close to 1
assert_allclose(abs(hp[0]), 1, rtol=1e-2)
assert_allclose(abs(hp[4]), 1, rtol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to -3dB
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be removed
# the frequency response should be very close to 0
assert_allclose(abs(hp[2]), 0, atol=1e-10)
class TestIIRPeak:
def test_ba_output(self):
# Compare coeficients with Matlab ones
# for the equivalent input:
b, a = iirpeak(0.06, 30)
b2 = [
3.131764229e-03, 0,
-3.131764229e-03
]
a2 = [
1.0000000e+00, -1.958421917e+00,
9.9373647e-01
]
assert_allclose(b, b2, rtol=1e-8)
assert_allclose(a, a2, rtol=1e-8)
def test_frequency_response(self):
# Get filter coeficients
b, a = iirpeak(0.3, 30)
# Get frequency response
w, h = freqz(b, a, 1000)
# Pick 5 point
p = [30, # w0 = 0.030
295, # w0 = 0.295
300, # w0 = 0.300
305, # w0 = 0.305
800] # w0 = 0.800
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfill the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 0.3 and should be close to 0
assert_allclose(abs(hp[0]), 0, atol=1e-2)
assert_allclose(abs(hp[4]), 0, atol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to 10**(-3/20)
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be retained and
# the frequency response should be very close to 1
assert_allclose(abs(hp[2]), 1, rtol=1e-10)
def test_errors(self):
# Exception should be raised if w0 > 1 or w0 <0
assert_raises(ValueError, iirpeak, w0=2, Q=30)
assert_raises(ValueError, iirpeak, w0=-1, Q=30)
# Exception should be raised if any of the parameters
# are not float (or cannot be converted to one)
assert_raises(ValueError, iirpeak, w0="blabla", Q=30)
assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3])
def test_fs_param(self):
# Get filter coeficients
b, a = iirpeak(1200, 30, fs=8000)
# Get frequency response
w, h = freqz(b, a, 1000, fs=8000)
# Pick 5 point
p = [30, # w0 = 120
295, # w0 = 1180
300, # w0 = 1200
305, # w0 = 1220
800] # w0 = 3200
# Get frequency response correspondent to each of those points
hp = h[p]
# Check if the frequency response fulfill the specifications:
# hp[0] and hp[4] correspond to frequencies distant from
# w0 = 1200 and should be close to 0
assert_allclose(abs(hp[0]), 0, atol=1e-2)
assert_allclose(abs(hp[4]), 0, atol=1e-2)
# hp[1] and hp[3] correspond to frequencies approximately
# on the edges of the passband and should be close to 10**(-3/20)
assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
# hp[2] correspond to the frequency that should be retained and
# the frequency response should be very close to 1
assert_allclose(abs(hp[2]), 1, rtol=1e-10)
class TestIIRComb:
# Test erroneus input cases
def test_invalid_input(self):
# w0 is <= 0 or >= fs / 2
fs = 1000
for args in [(-fs, 30), (0, 35), (fs / 2, 40), (fs, 35)]:
with pytest.raises(ValueError, match='w0 must be between '):
iircomb(*args, fs=fs)
# fs is not divisible by w0
for args in [(120, 30), (157, 35)]:
with pytest.raises(ValueError, match='fs must be divisible '):
iircomb(*args, fs=fs)
# Filter type is not notch or peak
for args in [(0.2, 30, 'natch'), (0.5, 35, 'comb')]:
with pytest.raises(ValueError, match='ftype must be '):
iircomb(*args)
# Verify that the filter's frequency response contains a
# notch at the cutoff frequency
@pytest.mark.parametrize('ftype', ('notch', 'peak'))
def test_frequency_response(self, ftype):
# Create a notching or peaking comb filter at 1000 Hz
b, a = iircomb(1000, 30, ftype=ftype, fs=10000)
# Compute the frequency response
freqs, response = freqz(b, a, 1000, fs=10000)
# Find the notch using argrelextrema
comb_points = argrelextrema(abs(response), np.less)[0]
# Verify that the first notch sits at 1000 Hz
comb1 = comb_points[0]
assert_allclose(freqs[comb1], 1000)
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
def test_iir_symmetry(self):
b, a = iircomb(400, 30, fs=24000)
z, p, k = tf2zpk(b, a)
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
assert issubclass(b.dtype.type, np.floating)
assert issubclass(a.dtype.type, np.floating)
# Verify filter coefficients with MATLAB's iircomb function
def test_ba_output(self):
b_notch, a_notch = iircomb(60, 35, ftype='notch', fs=600)
b_notch2 = [0.957020174408697, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, -0.957020174408697]
a_notch2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, -0.914040348817395]
assert_allclose(b_notch, b_notch2)
assert_allclose(a_notch, a_notch2)
b_peak, a_peak = iircomb(60, 35, ftype='peak', fs=600)
b_peak2 = [0.0429798255913026, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, -0.0429798255913026]
a_peak2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.914040348817395]
assert_allclose(b_peak, b_peak2)
assert_allclose(a_peak, a_peak2)
class TestIIRDesign:
def test_exceptions(self):
with pytest.raises(ValueError, match="the same shape"):
iirdesign(0.2, [0.1, 0.3], 1, 40)
with pytest.raises(ValueError, match="the same shape"):
iirdesign(np.array([[0.3, 0.6], [0.3, 0.6]]),
np.array([[0.4, 0.5], [0.4, 0.5]]), 1, 40)
with pytest.raises(ValueError, match="can't be negative"):
iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40)
with pytest.raises(ValueError, match="can't be larger than 1"):
iirdesign([0.1, 1.3], [0.1, 0.5], 1, 40)
with pytest.raises(ValueError, match="strictly inside stopband"):
iirdesign([0.1, 0.4], [0.5, 0.6], 1, 40)
with pytest.raises(ValueError, match="strictly inside stopband"):
iirdesign([0.5, 0.6], [0.1, 0.4], 1, 40)
with pytest.raises(ValueError, match="strictly inside stopband"):
iirdesign([0.3, 0.6], [0.4, 0.7], 1, 40)
with pytest.raises(ValueError, match="strictly inside stopband"):
iirdesign([0.4, 0.7], [0.3, 0.6], 1, 40)
class TestIIRFilter:
def test_symmetry(self):
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
for N in np.arange(1, 26):
for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'):
z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='zpk')
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='ba')
assert_(issubclass(b.dtype.type, np.floating))
assert_(issubclass(a.dtype.type, np.floating))
def test_int_inputs(self):
# Using integer frequency arguments and large N should not produce
# numpy integers that wraparound to negative numbers
k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel',
output='zpk')[2]
k2 = 9.999999999999989e+47
assert_allclose(k, k2)
def test_invalid_wn_size(self):
# low and high have 1 Wn, band and stop have 2 Wn
assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low')
assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high')
assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp')
assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True)
def test_invalid_wn_range(self):
# For digital filters, 0 <= Wn <= 1
assert_raises(ValueError, iirfilter, 1, 2, btype='low')
assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band')
assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band')
assert_raises(ValueError, iirfilter, 1, -1, btype='high')
assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band')
assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop')
class TestGroupDelay:
def test_identity_filter(self):
w, gd = group_delay((1, 1))
assert_array_almost_equal(w, pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
w, gd = group_delay((1, 1), whole=True)
assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
def test_fir(self):
# Let's design linear phase FIR and check that the group delay
# is constant.
N = 100
b = firwin(N + 1, 0.1)
w, gd = group_delay((b, 1))
assert_allclose(gd, 0.5 * N)
def test_iir(self):
# Let's design Butterworth filter and test the group delay at
# some points against MATLAB answer.
b, a = butter(4, 0.1)
w = np.linspace(0, pi, num=10, endpoint=False)
w, gd = group_delay((b, a), w=w)
matlab_gd = np.array([8.249313898506037, 11.958947880907104,
2.452325615326005, 1.048918665702008,
0.611382575635897, 0.418293269460578,
0.317932917836572, 0.261371844762525,
0.229038045801298, 0.212185774208521])
assert_array_almost_equal(gd, matlab_gd)
def test_singular(self):
# Let's create a filter with zeros and poles on the unit circle and
# check if warning is raised and the group delay is set to zero at
# these frequencies.
z1 = np.exp(1j * 0.1 * pi)
z2 = np.exp(1j * 0.25 * pi)
p1 = np.exp(1j * 0.5 * pi)
p2 = np.exp(1j * 0.8 * pi)
b = np.convolve([1, -z1], [1, -z2])
a = np.convolve([1, -p1], [1, -p2])
w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi])
w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w)
assert_allclose(gd, 0)
def test_backward_compat(self):
# For backward compatibility, test if None act as a wrapper for default
w1, gd1 = group_delay((1, 1))
w2, gd2 = group_delay((1, 1), None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(gd1, gd2)
def test_fs_param(self):
# Let's design Butterworth filter and test the group delay at
# some points against the normalized frequency answer.
b, a = butter(4, 4800, fs=96000)
w = np.linspace(0, 96000/2, num=10, endpoint=False)
w, gd = group_delay((b, a), w=w, fs=96000)
norm_gd = np.array([8.249313898506037, 11.958947880907104,
2.452325615326005, 1.048918665702008,
0.611382575635897, 0.418293269460578,
0.317932917836572, 0.261371844762525,
0.229038045801298, 0.212185774208521])
assert_array_almost_equal(gd, norm_gd)
def test_w_or_N_types(self):
# Measure at 8 equally-spaced points
for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, gd = group_delay((1, 1), N)
assert_array_almost_equal(w, pi * np.arange(8) / 8)
assert_array_almost_equal(gd, np.zeros(8))
# Measure at frequency 8 rad/sec
for w in (8.0, 8.0+0j):
w_out, gd = group_delay((1, 1), w)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(gd, [0])
class TestGammatone:
# Test erroneus input cases.
def test_invalid_input(self):
# Cutoff frequency is <= 0 or >= fs / 2.
fs = 16000
for args in [(-fs, 'iir'), (0, 'fir'), (fs / 2, 'iir'), (fs, 'fir')]:
with pytest.raises(ValueError, match='The frequency must be '
'between '):
gammatone(*args, fs=fs)
# Filter type is not fir or iir
for args in [(440, 'fie'), (220, 'it')]:
with pytest.raises(ValueError, match='ftype must be '):
gammatone(*args, fs=fs)
# Order is <= 0 or > 24 for FIR filter.
for args in [(440, 'fir', -50), (220, 'fir', 0), (110, 'fir', 25),
(55, 'fir', 50)]:
with pytest.raises(ValueError, match='Invalid order: '):
gammatone(*args, numtaps=None, fs=fs)
# Verify that the filter's frequency response is approximately
# 1 at the cutoff frequency.
def test_frequency_response(self):
fs = 16000
ftypes = ['fir', 'iir']
for ftype in ftypes:
# Create a gammatone filter centered at 1000 Hz.
b, a = gammatone(1000, ftype, fs=fs)
# Calculate the frequency response.
freqs, response = freqz(b, a)
# Determine peak magnitude of the response
# and corresponding frequency.
response_max = np.max(np.abs(response))
freq_hz = freqs[np.argmax(np.abs(response))] / ((2 * np.pi) / fs)
# Check that the peak magnitude is 1 and the frequency is 1000 Hz.
response_max == pytest.approx(1, rel=1e-2)
freq_hz == pytest.approx(1000, rel=1e-2)
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
def test_iir_symmetry(self):
b, a = gammatone(440, 'iir', fs=24000)
z, p, k = tf2zpk(b, a)
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
assert_(issubclass(b.dtype.type, np.floating))
assert_(issubclass(a.dtype.type, np.floating))
# Verify FIR filter coefficients with the paper's
# Mathematica implementation
def test_fir_ba_output(self):
b, _ = gammatone(15, 'fir', fs=1000)
b2 = [0.0, 2.2608075649884e-04,
1.5077903981357e-03, 4.2033687753998e-03,
8.1508962726503e-03, 1.2890059089154e-02,
1.7833890391666e-02, 2.2392613558564e-02,
2.6055195863104e-02, 2.8435872863284e-02,
2.9293319149544e-02, 2.852976858014e-02,
2.6176557156294e-02, 2.2371510270395e-02,
1.7332485267759e-02]
assert_allclose(b, b2)
# Verify IIR filter coefficients with the paper's MATLAB implementation
def test_iir_ba_output(self):
b, a = gammatone(440, 'iir', fs=16000)
b2 = [1.31494461367464e-06, -5.03391196645395e-06,
7.00649426000897e-06, -4.18951968419854e-06,
9.02614910412011e-07]
a2 = [1.0, -7.65646235454218,
25.7584699322366, -49.7319214483238,
60.2667361289181, -46.9399590980486,
22.9474798808461, -6.43799381299034,
0.793651554625368]
assert_allclose(b, b2)
assert_allclose(a, a2)
|
watonyweng/horizon | refs/heads/master | horizon/tables/base.py | 8 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
import logging
from operator import attrgetter
import sys
from django.core import exceptions as core_exceptions
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse # noqa
from django import template
from django.template.defaultfilters import slugify # noqa
from django.template.defaultfilters import truncatechars # noqa
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.html import escape
from django.utils import http
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
class Column(html.HTMLElement):
"""A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized if the ``transform`` is not callable,
otherwise it defaults to an empty string (``""``).
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('yes', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: cell_attributes_getter
A callable to get the HTML attributes of a column cell depending
on the data. For example, to add additional description or help
information for data in a column cell (e.g. in Images panel, for the
column 'format'):
helpText = {
'ARI':'Amazon Ramdisk Image'
'QCOW2':'QEMU' Emulator'
}
getHoverHelp(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
...
...
cell_attributes_getter = getHoverHelp
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
length of the data in this column is larger than the supplied number,
the data for this column will be truncated and an ellipsis will be
appended to the truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
This is left for backward compatibility. Deprecated in favor of the
link_attributes attribute.
Example: ``link_classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
.. attribute:: wrap_list
Boolean value indicating whether the contents of this cell should be
wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's
``unordered_list`` template filter. Defaults to ``False``.
.. attribute:: form_field
A form field used for inline editing of the column. A django
forms.Field can be used or django form.Widget can be used.
Example: ``form_field=forms.CharField(required=True)``.
Defaults to ``None``.
.. attribute:: form_field_attributes
The additional html attributes that will be rendered to form_field.
Example: ``form_field_attributes={'class': 'bold_input_field'}``.
Defaults to ``None``.
.. attribute:: update_action
The class that inherits from tables.actions.UpdateAction, update_cell
method takes care of saving inline edited data. The tables.base.Row
get_data method needs to be connected to table for obtaining the data.
Example: ``update_action=UpdateCell``.
Defaults to ``None``.
.. attribute:: link_attrs
A dict of HTML attribute strings which should be added when the
column's text is displayed as a link.
Examples:
``link_attrs={"data-foo": "bar"}``.
``link_attrs={"target": "_blank", "class": "link-foo link-bar"}``.
Defaults to ``None``.
.. attribute:: help_text
A string of simple help text displayed in a tooltip when you hover
over the help icon beside the Column name. Defaults to ``None``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_counter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('yes', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None, wrap_list=False,
form_field=None, form_field_attributes=None,
update_action=None, link_attrs=None,
cell_attributes_getter=None, help_text=None):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = "<%s callable>" % transform.__name__
else:
self.transform = six.text_type(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
if callable(transform):
self.verbose_name = ''
else:
self.verbose_name = self.transform.title()
else:
self.verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or _('-')
self.filters = filters or []
self.truncate = truncate
self.wrap_list = wrap_list
self.form_field = form_field
self.form_field_attributes = form_field_attributes or {}
self.update_action = update_action
self.link_attrs = link_attrs or {}
self.help_text = help_text
if link_classes:
self.link_attrs['class'] = ' '.join(link_classes)
self.cell_attributes_getter = cell_attributes_getter
if status_choices:
self.status_choices = status_choices
self.display_choices = display_choices
if summation is not None and summation not in self.summation_methods:
raise ValueError("Summation method %s must be one of %s."
% (summation,
", ".join(self.summation_methods.keys())))
self.summation = summation
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
if self.sortable and not self.auto:
self.classes.append("sortable")
if self.hidden:
self.classes.append("hide")
if self.link is not None:
self.classes.append('anchor')
def __unicode__(self):
return six.text_type(self.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_raw_data(self, datum):
"""Returns the raw data for this column, before any filters or
formatting are applied to it. This is useful when doing calculations
on data in the table.
"""
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Dict lookups
elif isinstance(datum, collections.Mapping) and \
self.transform in datum:
data = datum.get(self.transform)
else:
# Basic object lookups
try:
data = getattr(datum, self.transform)
except AttributeError:
msg = _("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform, 'obj': datum}
msg = termcolors.colorize(msg, **PALETTE['ERROR'])
LOG.warning(msg)
data = None
return data
def get_data(self, datum):
"""Returns the final display data for this column from the given
inputs.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
data = self.get_raw_data(datum)
display_value = None
if self.display_choices:
display_value = [display for (value, display) in
self.display_choices
if value.lower() == (data or '').lower()]
if display_value:
data = display_value[0]
else:
for filter_func in self.filters:
try:
data = filter_func(data)
except Exception:
msg = ("Filter '%(filter)s' failed with data "
"'%(data)s' on column '%(col_name)s'")
args = {'filter': filter_func.func_name,
'data': data,
'col_name': six.text_type(self.verbose_name)}
LOG.warning(msg, args)
if data and self.truncate:
data = truncatechars(data, self.truncate)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_link_url(self, datum):
"""Returns the final value for the column's ``link`` property.
If ``allowed_data_types`` of this column is not empty and the datum
has an assigned type, check if the datum's type is in the
``allowed_data_types`` list. If not, the datum won't be displayed
as a link.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
if self.allowed_data_types:
data_type_name = self.table._meta.data_type_name
data_type = getattr(datum, data_type_name, None)
if data_type and (data_type not in self.allowed_data_types):
return None
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
def get_summation(self):
"""Returns the summary value for the data in this column if a
valid summation method is specified for it. Otherwise returns ``None``.
"""
if self.summation not in self.summation_methods:
return None
summation_function = self.summation_methods[self.summation]
data = [self.get_raw_data(datum) for datum in self.table.data]
data = filter(lambda datum: datum is not None, data)
if len(data):
try:
summation = summation_function(data)
for filter_func in self.filters:
summation = filter_func(summation)
return summation
except TypeError:
pass
return None
class Row(html.HTMLElement):
"""Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
Rows are capable of AJAX updating, with a little added work:
The ``ajax`` property needs to be set to ``True``, and
subclasses need to define a ``get_data`` method which returns a data
object appropriate for consumption by the table (effectively the "get"
lookup versus the table's "list" lookup).
The automatic update interval is configurable by setting the key
``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary.
Default: ``2500`` (measured in milliseconds).
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``SortedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row calculated from
the values of the table's ``status_columns`` if they are set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
.. attribute:: ajax
Boolean value to determine whether ajax updating for this row is
enabled.
.. attribute:: ajax_action_name
String that is used for the query parameter key to request AJAX
updates. Generally you won't need to change this value.
Default: ``"row_update"``.
.. attribute:: ajax_cell_action_name
String that is used for the query parameter key to request AJAX
updates of cell. Generally you won't need to change this value.
It is also used for inline edit of the cell.
Default: ``"cell_update"``.
"""
ajax = False
ajax_action_name = "row_update"
ajax_cell_action_name = "cell_update"
def __init__(self, table, datum=None):
super(Row, self).__init__()
self.table = table
self.datum = datum
self.selected = False
if self.datum:
self.load_cells()
else:
self.id = None
self.cells = []
def load_cells(self, datum=None):
"""Load the row's data (either provided at initialization or as an
argument to this function), initialize all the cells contained
by this row, and set the appropriate row properties which require
the row's data to be determined.
This function is called automatically by
:meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is
provided. However, by not providing the data during initialization
this function allows for the possibility of a two-step loading
pattern when you need a row instance but don't yet have the data
available.
"""
# Compile all the cells on instantiation.
table = self.table
if datum:
self.datum = datum
else:
datum = self.datum
cells = []
for column in table.columns.values():
cell = table._meta.cell_class(datum, column, self)
cells.append((column.name or column.auto, cell))
self.cells = SortedDict(cells)
if self.ajax:
interval = conf.HORIZON_CONFIG['ajax_poll_interval']
self.attrs['data-update-interval'] = interval
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.classes.append("ajax-update")
self.attrs['data-object-id'] = table.get_object_id(datum)
# Add the row's status class and id to the attributes to be rendered.
self.classes.append(self.status_class)
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
self.attrs['id'] = self.id
# Add the row's display name if available
display_name = table.get_object_display(datum)
if display_name:
self.attrs['data-display'] = escape(display_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
@property
def status(self):
column_names = self.table._meta.status_columns
if column_names:
statuses = dict([(column_name, self.cells[column_name].status) for
column_name in column_names])
return self.table.calculate_row_status(statuses)
@property
def status_class(self):
column_names = self.table._meta.status_columns
if column_names:
return self.table.get_row_status_class(self.status)
else:
return ''
def render(self):
return render_to_string("horizon/common/_data_table_row.html",
{"row": self})
def get_cells(self):
"""Returns the bound cells for this row in order."""
return self.cells.values()
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(SortedDict([
("action", self.ajax_action_name),
("table", self.table.name),
("obj_id", self.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
def can_be_selected(self, datum):
"""By default if multiselect enabled return True. You can remove the
checkbox after an ajax update here if required.
"""
return True
def get_data(self, request, obj_id):
"""Fetches the updated data for the row based on the object id
passed in. Must be implemented by a subclass to allow AJAX updating.
"""
return {}
class Cell(html.HTMLElement):
"""Represents a single cell in the table."""
def __init__(self, datum, column, row, attrs=None, classes=None):
self.classes = classes or getattr(self, "classes", [])
super(Cell, self).__init__()
self.attrs.update(attrs or {})
self.datum = datum
self.column = column
self.row = row
self.wrap_list = column.wrap_list
self.inline_edit_available = self.column.update_action is not None
# initialize the update action if available
if self.inline_edit_available:
self.update_action = self.column.update_action()
self.attrs['data-cell-name'] = column.name
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.inline_edit_mod = False
# add tooltip to cells if the truncate variable is set
if column.truncate:
data = getattr(datum, column.name, '') or ''
if len(data) > column.truncate:
self.attrs['data-toggle'] = 'tooltip'
self.attrs['title'] = data
self.data = self.get_data(datum, column, row)
def get_data(self, datum, column, row):
"""Fetches the data to be displayed in this cell."""
table = row.table
if column.auto == "multi_select":
data = ""
if row.can_be_selected(datum):
widget = forms.CheckboxInput(check_test=lambda value: False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
six.text_type(table.get_object_id(datum)),
{'class': 'table-row-multi-select'})
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "form_field":
widget = column.form_field
if issubclass(widget.__class__, forms.Field):
widget = widget.widget
widget_name = "%s__%s" % \
(column.name,
six.text_type(table.get_object_id(datum)))
# Create local copy of attributes, so it don't change column
# class form_field_attributes
form_field_attributes = {}
form_field_attributes.update(column.form_field_attributes)
# Adding id of the input so it pairs with label correctly
form_field_attributes['id'] = widget_name
if template.defaultfilters.urlize in column.filters:
data = widget.render(widget_name,
column.get_raw_data(datum),
form_field_attributes)
else:
data = widget.render(widget_name,
column.get_data(datum),
form_field_attributes)
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum, pull_right=False)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
if column.cell_attributes_getter:
cell_attributes = column.cell_attributes_getter(data) or {}
self.attrs.update(cell_attributes)
return data
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def id(self):
return ("%s__%s" % (self.column.name,
six.text_type(self.row.table.get_object_id(self.datum))))
@property
def value(self):
"""Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
if self.url and not self.column.auto == "form_field":
link_attrs = ' '.join(['%s="%s"' % (k, v) for (k, v) in
self.column.link_attrs.items()])
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" %s>%s</a>' % (
(escape(self.url),
link_attrs,
escape(six.text_type(data)))))
return data
@property
def url(self):
if self.column.link:
url = self.column.get_link_url(self.datum)
if url:
return url
else:
return None
@property
def status(self):
"""Gets the status for the column based on the cell's data."""
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
if self.column.status or \
self.column.name in self.column.table._meta.status_columns:
# returns the first matching status found
data_status_lower = six.text_type(
self.column.get_raw_data(self.datum)).lower()
for status_name, status_value in self.column.status_choices:
if six.text_type(status_name).lower() == data_status_lower:
self._status = status_value
return self._status
self._status = None
return self._status
def get_status_class(self, status):
"""Returns a css class name determined by the status value."""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_default_classes(self):
"""Returns a flattened string of the cell's CSS classes."""
if not self.url:
self.column.classes = [cls for cls in self.column.classes
if cls != "anchor"]
column_class_string = self.column.get_final_attrs().get('class', "")
classes = set(column_class_string.split(" "))
if self.column.status:
classes.add(self.get_status_class(self.status))
if self.inline_edit_available:
classes.add("inline_edit_available")
return list(classes)
def get_ajax_update_url(self):
column = self.column
table_url = column.table.get_absolute_url()
params = urlencode(SortedDict([
("action", self.row.ajax_cell_action_name),
("table", column.table.name),
("cell_name", column.name),
("obj_id", column.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
@property
def update_allowed(self):
"""Determines whether update of given cell is allowed.
Calls allowed action of defined UpdateAction of the Column.
"""
return self.update_action.allowed(self.column.table.request,
self.datum,
self)
def render(self):
return render_to_string("horizon/common/_data_table_cell.html",
{"cell": self})
class DataTableOptions(object):
"""Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the
:class:`~horizon.tables.Action` class. These actions will handle tasks
such as bulk deletion, etc. for multiple objects at once.
.. attribute:: table_actions_menu
A list of action classes similar to ``table_actions`` except these
will be displayed in a menu instead of as individual buttons. Actions
from this list will take precedence over actions from the
``table_actions`` list.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: prev_pagination_param
The name of the query string parameter which will be used when
paginating backward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"prev_marker"``.
.. attribute:: pagination_param
The name of the query string parameter which will be used when
paginating forward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"marker"``.
.. attribute:: status_columns
A list or tuple of column names which represents the "state"
of the data object being represented.
If ``status_columns`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
The row status is used by other Horizon components to trigger tasks
such as dynamic AJAX updating.
.. attribute:: cell_class
The class which should be used for rendering the cells of this table.
Optional. Default: :class:`~horizon.tables.Cell`.
.. attribute:: row_class
The class which should be used for rendering the rows of this table.
Optional. Default: :class:`~horizon.tables.Row`.
.. attribute:: column_class
The class which should be used for handling the columns of this table.
Optional. Default: :class:`~horizon.tables.Column`.
.. attribute:: css_classes
A custom CSS class or classes to add to the ``<table>`` tag of the
rendered table, for when the particular table requires special styling.
Default: ``""``.
.. attribute:: mixed_data_type
A toggle to indicate if the table accepts two or more types of data.
Optional. Default: ``False``
.. attribute:: data_types
A list of data types that this table would accept. Default to be an
empty list, but if the attribute ``mixed_data_type`` is set to
``True``, then this list must have at least one element.
.. attribute:: data_type_name
The name of an attribute to assign to data passed to the table when it
accepts mix data. Default: ``"_table_data_type"``
.. attribute:: footer
Boolean to control whether or not to show the table's footer.
Default: ``True``.
.. attribute:: hidden_title
Boolean to control whether or not to show the table's title.
Default: ``True``.
.. attribute:: permissions
A list of permission names which this table requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = (getattr(options, 'verbose_name', None)
or self.name.title())
self.verbose_name = verbose_name
self.columns = getattr(options, 'columns', None)
self.status_columns = getattr(options, 'status_columns', [])
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
self.table_actions_menu = getattr(options, 'table_actions_menu', [])
self.cell_class = getattr(options, 'cell_class', Cell)
self.row_class = getattr(options, 'row_class', Row)
self.column_class = getattr(options, 'column_class', Column)
self.css_classes = getattr(options, 'css_classes', '')
self.prev_pagination_param = getattr(options,
'prev_pagination_param',
'prev_marker')
self.pagination_param = getattr(options, 'pagination_param', 'marker')
self.browser_table = getattr(options, 'browser_table', None)
self.footer = getattr(options, 'footer', True)
self.hidden_title = getattr(options, 'hidden_title', True)
self.no_data_message = getattr(options,
"no_data_message",
_("No items to display."))
self.permissions = getattr(options, 'permissions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions is not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = getattr(options,
'template',
'horizon/common/_data_table.html')
self.row_actions_dropdown_template = ('horizon/common/_data_table_'
'row_actions_dropdown.html')
self.row_actions_row_template = ('horizon/common/_data_table_'
'row_actions_row.html')
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = six.text_type(getattr(options,
'context_var_name',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_prev_data = False
self.has_more_data = False
# Set mixed data type table attr
self.mixed_data_type = getattr(options, 'mixed_data_type', False)
self.data_types = getattr(options, 'data_types', [])
# If the data_types has more than 2 elements, set mixed_data_type
# to True automatically.
if len(self.data_types) > 1:
self.mixed_data_type = True
# However, if the mixed_data_type is set to True manually and the
# the data_types is empty, raise an error.
if self.mixed_data_type and len(self.data_types) <= 1:
raise ValueError("If mixed_data_type is set to True in class %s, "
"data_types should has more than one types" %
self.name)
self.data_type_name = getattr(options,
'data_type_name',
"_table_data_type")
class DataTableMetaclass(type):
"""Metaclass to add options to DataTable class and collect columns."""
def __new__(mcs, name, bases, attrs):
# Process options from Meta
class_name = name
attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = []
for attr_name, obj in attrs.items():
if issubclass(type(obj), (opts.column_class, Column)):
column_instance = attrs.pop(attr_name)
column_instance.name = attr_name
column_instance.classes.append('normal_column')
columns.append((attr_name, column_instance))
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
columns = base.base_columns.items() + columns
attrs['base_columns'] = SortedDict(columns)
# If the table is in a ResourceBrowser, the column number must meet
# these limits because of the width of the browser.
if opts.browser_table == "navigation" and len(columns) > 3:
raise ValueError("You can only assign three column to %s."
% class_name)
if opts.browser_table == "content" and len(columns) > 2:
raise ValueError("You can only assign two columns to %s."
% class_name)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select and opts.browser_table != "navigation":
multi_select = opts.column_class("multi_select",
verbose_name="",
auto="multi_select")
multi_select.classes.append('multi_select_column')
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = opts.column_class("actions",
verbose_name=_("Actions"),
auto="actions")
actions_column.classes.append('actions_column')
columns.append(("actions", actions_column))
# Store this set of columns internally so we can copy them per-instance
attrs['_columns'] = SortedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions) |
set(opts.table_actions_menu))
actions.sort(key=attrgetter('name'))
actions_dict = SortedDict([(action.name, action())
for action in actions])
attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, attrs)
@six.add_metaclass(DataTableMetaclass)
class DataTable(object):
"""A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
self.request = request
self.data = data
self.kwargs = kwargs
self._needs_form_wrapper = needs_form_wrapper
self._no_data_message = self._meta.no_data_message
self.breadcrumb = None
self.current_item_id = None
self.permissions = self._meta.permissions
# Create a new set
columns = []
for key, _column in self._columns.items():
column = copy.copy(_column)
column.table = self
columns.append((key, column))
self.columns = SortedDict(columns)
self._populate_data_cache()
# Associate these actions with this table
for action in self.base_actions.values():
action.associate_with_table(self)
self.needs_summary_row = any([col.summation
for col in self.columns.values()])
def __unicode__(self):
return six.text_type(self._meta.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._meta.name)
@property
def name(self):
return self._meta.name
@property
def footer(self):
return self._meta.footer
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
# This function should be using django.utils.functional.cached_property
# decorator, but unfortunately due to bug in Django
# https://code.djangoproject.com/ticket/19872 it would make it fail
# when being mocked by mox in tests.
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
filter_string = self.get_filter_string()
filter_field = self.get_filter_field()
request_method = self.request.method
needs_preloading = (not filter_string
and request_method == 'GET'
and action.needs_preloading)
valid_method = (request_method == action.method)
not_api_filter = (filter_string
and not action.is_api_filter(filter_field))
if valid_method or needs_preloading or not_api_filter:
if self._meta.mixed_data_type:
self._filtered_data = action.data_type_filter(
self, self.data, filter_string)
else:
self._filtered_data = action.filter(
self, self.data, filter_string)
return self._filtered_data
def slugify_name(self):
return str(slugify(self._meta.name))
def get_filter_string(self):
"""Get the filter string value. For 'server' type filters this is
saved in the session so that it gets persisted across table loads.
For other filter types this is obtained from the POST dict.
"""
filter_action = self._meta._filter_action
param_name = filter_action.get_param_name()
filter_string = ''
if filter_action.filter_type == 'server':
filter_string = self.request.session.get(param_name, '')
else:
filter_string = self.request.POST.get(param_name, '')
return filter_string
def get_filter_field(self):
"""Get the filter field value used for 'server' type filters. This
is the value from the filter action's list of filter choices.
"""
filter_action = self._meta._filter_action
param_name = '%s_field' % filter_action.get_param_name()
filter_field = self.request.session.get(param_name, '')
return filter_field
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
row_matched = True
if self._meta.mixed_data_type:
row_matched = action.data_type_matched(datum)
return action._allowed(request, datum) and row_matched
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def is_browser_table(self):
if self._meta.browser_table:
return True
return False
def render(self):
"""Renders the table using the template from the table options."""
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self,
'hidden_title': self._meta.hidden_title}
context = template.RequestContext(self.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
"""Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the table was requested.
"""
return self.request.get_full_path().partition('?')[0]
def get_full_url(self):
"""Returns the full URL path for this table.
This is used for the POST action attribute on the form element
wrapping the table. We use this method to persist the
pagination marker.
"""
return self.request.get_full_path()
def get_empty_message(self):
"""Returns the message to be displayed when there is no data."""
return self._no_data_message
def get_object_by_id(self, lookup):
"""Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
We will convert the object id and ``lookup`` to unicode before
comparison.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
if not isinstance(lookup, six.text_type):
lookup = six.text_type(str(lookup), 'utf-8')
matches = []
for datum in self.data:
obj_id = self.get_object_id(datum)
if not isinstance(obj_id, six.text_type):
obj_id = six.text_type(str(obj_id), 'utf-8')
if obj_id == lookup:
matches.append(datum)
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
@property
def has_actions(self):
"""Boolean. Indicates whether there are any available actions on this
table.
"""
if not self.base_actions:
return False
return any(self.get_table_actions()) or any(self._meta.row_actions)
@property
def needs_form_wrapper(self):
"""Boolean. Indicates whether this table should be rendered wrapped in
a ``<form>`` tag or not.
"""
# If needs_form_wrapper is explicitly set, defer to that.
if self._needs_form_wrapper is not None:
return self._needs_form_wrapper
# Otherwise calculate whether or not we need a form element.
return self.has_actions
def get_table_actions(self):
"""Returns a list of the action instances for this table."""
button_actions = [self.base_actions[action.name] for action in
self._meta.table_actions if
action not in self._meta.table_actions_menu]
menu_actions = [self.base_actions[action.name] for
action in self._meta.table_actions_menu]
bound_actions = button_actions + menu_actions
return [action for action in bound_actions if
self._filter_action(action, self.request)]
def get_row_actions(self, datum):
"""Returns a list of the action instances for a specific row."""
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
bound_action.attrs = copy.copy(bound_action.attrs)
bound_action.datum = datum
# Remove disallowed actions.
if not self._filter_action(bound_action,
self.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self.request, datum)
# Pre-create the URL for this link with appropriate parameters
if issubclass(bound_action.__class__, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def set_multiselect_column_visibility(self, visible=True):
"""hide checkbox column if no current table action is allowed."""
if not self.multi_select:
return
select_column = self.columns.values()[0]
# Try to find if the hidden class need to be
# removed or added based on visible flag.
hidden_found = 'hidden' in select_column.classes
if hidden_found and visible:
select_column.classes.remove('hidden')
elif not hidden_found and not visible:
select_column.classes.append('hidden')
def render_table_actions(self):
"""Renders the actions specified in ``Meta.table_actions``."""
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions,
"table_actions_buttons": [],
"table_actions_menu": []}
if self._meta.filter and (
self._filter_action(self._meta._filter_action, self.request)):
extra_context["filter"] = self._meta._filter_action
for action in bound_actions:
if action.__class__ in self._meta.table_actions_menu:
extra_context['table_actions_menu'].append(action)
elif action != extra_context.get('filter'):
extra_context['table_actions_buttons'].append(action)
context = template.RequestContext(self.request, extra_context)
self.set_multiselect_column_visibility(len(bound_actions) > 0)
return table_actions_template.render(context)
def render_row_actions(self, datum, pull_right=True, row=False):
"""Renders the actions specified in ``Meta.row_actions`` using the
current row data. If `row` is True, the actions are rendered in a row
of buttons. Otherwise they are rendered in a dropdown box.
"""
if row:
template_path = self._meta.row_actions_row_template
else:
template_path = self._meta.row_actions_dropdown_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum),
"pull_right": pull_right}
context = template.RequestContext(self.request, extra_context)
return row_actions_template.render(context)
@staticmethod
def parse_action(action_string):
"""Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
table = bits[0]
action = bits[1]
try:
object_id = STRING_SEPARATOR.join(bits[2:])
if object_id == '':
object_id = None
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if not action or action.method != self.request.method:
# We either didn't get an action or we're being hacked. Goodbye.
return None
# Meanwhile, back in Gotham...
if not action.requires_input or obj_id or obj_ids:
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self.request, obj_id)
# Otherwise figure out what to pass along
else:
# Preference given to a specific id, since that implies
# the user selected an action for just one row.
if obj_id:
obj_ids = [obj_id]
response = action.multiple(self, self.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self.request,
_("Please select a row before taking that action."))
return None
@classmethod
def check_handler(cls, request):
"""Determine whether the request should be handled by this table."""
if request.method == "POST" and "action" in request.POST:
table, action, obj_id = cls.parse_action(request.POST["action"])
elif "table" in request.GET and "action" in request.GET:
table = request.GET["table"]
action = request.GET["action"]
obj_id = request.GET.get("obj_id", None)
else:
table = action = obj_id = None
return table, action, obj_id
def maybe_preempt(self):
"""Determine whether the request should be handled by a preemptive
action on this table or by an AJAX row update before loading any data.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name:
# Handle AJAX row updating.
new_row = self._meta.row_class(self)
if new_row.ajax and new_row.ajax_action_name == action_name:
try:
datum = new_row.get_data(request, obj_id)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
new_row.classes.append('current_selected')
new_row.load_cells(datum)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(new_row.render())
else:
return HttpResponse(status=error.status_code)
elif new_row.ajax_cell_action_name == action_name:
# inline edit of the cell actions
return self.inline_edit_handle(request, table_name,
action_name, obj_id,
new_row)
preemptive_actions = [action for action in
self.base_actions.values() if action.preempt]
if action_name:
for action in preemptive_actions:
if action.name == action_name:
handled = self.take_action(action_name, obj_id)
if handled:
return handled
return None
def inline_edit_handle(self, request, table_name, action_name, obj_id,
new_row):
"""Inline edit handler.
Showing form or handling update by POST of the cell.
"""
try:
cell_name = request.GET['cell_name']
datum = new_row.get_data(request, obj_id)
# TODO(lsmola) extract load cell logic to Cell and load
# only 1 cell. This is kind of ugly.
if request.GET.get('inline_edit_mod') == "true":
new_row.table.columns[cell_name].auto = "form_field"
inline_edit_mod = True
else:
inline_edit_mod = False
# Load the cell and set the inline_edit_mod.
new_row.load_cells(datum)
cell = new_row.cells[cell_name]
cell.inline_edit_mod = inline_edit_mod
# If not allowed, neither edit mod or updating is allowed.
if not cell.update_allowed:
datum_display = (self.get_object_display(datum) or
_("N/A"))
LOG.info('Permission denied to %s: "%s"' %
("Update Action", datum_display))
return HttpResponse(status=401)
# If it is post request, we are updating the cell.
if request.method == "POST":
return self.inline_update_action(request,
datum,
cell,
obj_id,
cell_name)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(cell.render())
else:
return HttpResponse(status=error.status_code)
def inline_update_action(self, request, datum, cell, obj_id, cell_name):
"""Handling update by POST of the cell.
"""
new_cell_value = request.POST.get(
cell_name + '__' + obj_id, None)
if issubclass(cell.column.form_field.__class__,
forms.Field):
try:
# using Django Form Field to parse the
# right value from POST and to validate it
new_cell_value = (
cell.column.form_field.clean(
new_cell_value))
cell.update_action.action(
self.request, datum, obj_id, cell_name, new_cell_value)
response = {
'status': 'updated',
'message': ''
}
return HttpResponse(
json.dumps(response),
status=200,
content_type="application/json")
except core_exceptions.ValidationError:
# if there is a validation error, I will
# return the message to the client
exc_type, exc_value, exc_traceback = (
sys.exc_info())
response = {
'status': 'validation_error',
'message': ' '.join(exc_value.messages)}
return HttpResponse(
json.dumps(response),
status=400,
content_type="application/json")
def maybe_handle(self):
"""Determine whether the request should be handled by any action on
this table after data has been loaded.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name and action_name:
action_names = [action.name for action in
self.base_actions.values() if not action.preempt]
# do not run preemptive actions here
if action_name in action_names:
return self.take_action(action_name, obj_id)
return None
def sanitize_id(self, obj_id):
"""Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
"""Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
.. warning::
Make sure that the value returned is a unique value for the id
otherwise rendering issues can occur.
"""
return datum.id
def get_object_display(self, datum):
"""Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overridden to return other values.
"""
if hasattr(datum, 'name'):
return datum.name
return None
def has_prev_data(self):
"""Returns a boolean value indicating whether there is previous data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_prev_data
def has_more_data(self):
"""Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_prev_marker(self):
"""Returns the identifier for the first object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[0])) \
if self.data else ''
def get_marker(self):
"""Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1])) \
if self.data else ''
def get_prev_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the previous page.
"""
return "=".join([self._meta.prev_pagination_param,
self.get_prev_marker()])
def get_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the next page.
"""
return "=".join([self._meta.pagination_param, self.get_marker()])
def calculate_row_status(self, statuses):
"""Returns a boolean value determining the overall row status
based on the dictionary of column name to status mappings passed in.
By default, it uses the following logic:
#. If any statuses are ``False``, return ``False``.
#. If no statuses are ``False`` but any or ``None``, return ``None``.
#. If all statuses are ``True``, return ``True``.
This provides the greatest protection against false positives without
weighting any particular columns.
The ``statuses`` parameter is passed in as a dictionary mapping
column names to their statuses in order to allow this function to
be overridden in such a way as to weight one column's status over
another should that behavior be desired.
"""
values = statuses.values()
if any([status is False for status in values]):
return False
elif any([status is None for status in values]):
return None
else:
return True
def get_row_status_class(self, status):
"""Returns a css class name determined by the status value. This class
name is used to indicate the status of the rows in the table if
any ``status_columns`` have been specified.
"""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_columns(self):
"""Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
"""Return the row data for this table broken out by columns."""
rows = []
try:
for datum in self.filtered_data:
row = self._meta.row_class(self, datum)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
return rows
def css_classes(self):
"""Returns the additional CSS class to be added to <table> tag."""
return self._meta.css_classes
|
KaranToor/MA450 | refs/heads/master | google-cloud-sdk/lib/surface/resource_manager/folders/__init__.py | 3 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The group for the Cloud Folders CLI."""
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Folders(base.Group):
"""Manage Cloud Folders.
Commands to query and update your Cloud Folders.
"""
|
vongazman/libcloud | refs/heads/trunk | docs/examples/compute/ntta/instantiate_driver.py | 30 | from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.NTTA)
driver = cls('my username', 'my password', region='ntta-na')
pprint(driver.list_nodes())
|
openego/oeplatform | refs/heads/develop | modelview/migrations/0034_auto_20160426_1106.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-26 09:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("modelview", "0033_auto_20160407_1635")]
operations = [
migrations.CreateModel(
name="Energystudy",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name_of_the_study",
models.CharField(
help_text="What is the name of the study?",
max_length=1000,
verbose_name="Name of the study",
),
),
(
"author_Institution",
models.CharField(
help_text="Who are the authors of the study and for which institution do they work?",
max_length=1000,
verbose_name="Author, Institution",
),
),
(
"client",
models.CharField(
help_text="Who are the customers requesting the study?",
max_length=1000,
null=True,
verbose_name="Client",
),
),
("funding_private", models.BooleanField(verbose_name="private")),
("funding_public", models.BooleanField(verbose_name="public")),
("funding_no_funding", models.BooleanField(verbose_name="no funding")),
(
"citation_reference",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=1000),
null=True,
size=None,
verbose_name="Citation reference",
),
),
(
"citation_doi",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=1000),
null=True,
size=None,
verbose_name="Citation doi",
),
),
(
"aim",
models.CharField(
help_text="What is the purpose (hypothesis) and research question of the study?",
max_length=1000,
null=True,
verbose_name="Aim",
),
),
(
"new_aspects",
models.CharField(
help_text="What is new? (beyond state of research)",
max_length=1000,
null=True,
verbose_name="New aspects",
),
),
(
"spatial_Geographical_coverage",
models.CharField(
help_text="Which geographical region is adressed in the study?",
max_length=1000,
null=True,
verbose_name="Spatial / Geographical coverage",
),
),
("time_frame_2020", models.BooleanField(verbose_name="2020")),
("time_frame_2030", models.BooleanField(verbose_name="2030")),
("time_frame_2050", models.BooleanField(verbose_name="2050")),
("time_frame_other", models.BooleanField(verbose_name="other")),
("time_frame_other_text", models.CharField(max_length=1000, null=True)),
(
"time_frame_2_target_year",
models.BooleanField(verbose_name="target year"),
),
(
"time_frame_2_transformation_path",
models.BooleanField(verbose_name="transformation path"),
),
(
"tools_other",
models.CharField(
help_text="Which model(s) and other tools have been used?",
max_length=1000,
null=True,
verbose_name="Tools",
),
),
(
"modeled_energy_sectors_electricity",
models.BooleanField(verbose_name="electricity"),
),
(
"modeled_energy_sectors_heat",
models.BooleanField(verbose_name="heat"),
),
(
"modeled_energy_sectors_liquid_fuels",
models.BooleanField(verbose_name="liquid fuels"),
),
("modeled_energy_sectors_gas", models.BooleanField(verbose_name="gas")),
(
"modeled_energy_sectors_others",
models.BooleanField(verbose_name="others"),
),
(
"modeled_energy_sectors_others_text",
models.CharField(max_length=1000, null=True),
),
(
"modeled_demand_sectors_households",
models.BooleanField(verbose_name="households"),
),
(
"modeled_demand_sectors_industry",
models.BooleanField(verbose_name="industry"),
),
(
"modeled_demand_sectors_commercial_sector",
models.BooleanField(verbose_name="commercial sector"),
),
(
"modeled_demand_sectors_transport",
models.BooleanField(verbose_name="transport"),
),
(
"economic_behavioral_perfect",
models.BooleanField(
verbose_name="single fictive decision-maker with perfect knowledge (perfect foresight optimization)"
),
),
(
"economic_behavioral_myopic",
models.BooleanField(
verbose_name="single fictive decision-maker with myopic foresight (time-step optimization)"
),
),
(
"economic_behavioral_qualitative",
models.BooleanField(
verbose_name="decisions simulated by modeller due to qualitative criteria (spread-sheet simulation)"
),
),
(
"economic_behavioral_agentbased",
models.BooleanField(
verbose_name="representation of heterogenous decision rules for multiple agents (agent-based approach)"
),
),
(
"economic_behavioral_other",
models.BooleanField(verbose_name="other"),
),
(
"economic_behavioral_other_text",
models.CharField(max_length=1000, null=True),
),
("renewables_PV", models.BooleanField(verbose_name="PV")),
("renewables_wind", models.BooleanField(verbose_name="wind")),
("renewables_hydro", models.BooleanField(verbose_name="hydro")),
("renewables_biomass", models.BooleanField(verbose_name="biomass")),
("renewables_biogas", models.BooleanField(verbose_name="biogas")),
("renewables_solar", models.BooleanField(verbose_name="solar thermal")),
("renewables_others", models.BooleanField(verbose_name="others")),
(
"renewables_others_text",
models.CharField(max_length=1000, null=True),
),
(
"conventional_generation_gas",
models.BooleanField(verbose_name="gas"),
),
(
"conventional_generation_coal",
models.BooleanField(verbose_name="coal"),
),
(
"conventional_generation_oil",
models.BooleanField(verbose_name="oil"),
),
(
"conventional_generation_liquid",
models.BooleanField(verbose_name="liquid fuels"),
),
(
"conventional_generation_nuclear",
models.BooleanField(verbose_name="nuclear"),
),
("CHP", models.BooleanField(verbose_name="CHP")),
(
"networks_electricity_gas_electricity",
models.BooleanField(verbose_name="electricity"),
),
(
"networks_electricity_gas_gas",
models.BooleanField(verbose_name="gas"),
),
(
"networks_electricity_gas_heat",
models.BooleanField(verbose_name="heat"),
),
("storages_battery", models.BooleanField(verbose_name="battery")),
("storages_kinetic", models.BooleanField(verbose_name="kinetic")),
("storages_CAES", models.BooleanField(verbose_name="compressed air")),
("storages_PHS", models.BooleanField(verbose_name="pump hydro")),
("storages_chemical", models.BooleanField(verbose_name="chemical")),
(
"economic_focuses_included",
models.CharField(
help_text="Have there been economic focusses/sectors included?",
max_length=1000,
null=True,
verbose_name="Economic focuses included",
),
),
(
"social_focuses_included",
models.CharField(
help_text="Have there been social focusses/sectors included? ",
max_length=1000,
null=True,
verbose_name="Social focuses included",
),
),
(
"endogenous_variables",
models.CharField(
help_text="Which time series and variables are generated inside the model?",
max_length=1000,
null=True,
verbose_name="Endogenous variables",
),
),
(
"sensitivities",
models.BooleanField(
help_text="Have there been sensitivities?",
verbose_name="Sensitivities",
),
),
("time_steps_anual", models.BooleanField(verbose_name="anual")),
("time_steps_hour", models.BooleanField(verbose_name="hour")),
("time_steps_15_min", models.BooleanField(verbose_name="15 min")),
("time_steps_1_min", models.BooleanField(verbose_name="1 min")),
("time_steps_sec", models.BooleanField(verbose_name="sec")),
("time_steps_other", models.BooleanField(verbose_name="other")),
("time_steps_other_text", models.CharField(max_length=1000, null=True)),
],
),
migrations.RemoveField(model_name="energyscenario", name="CHP"),
migrations.RemoveField(model_name="energyscenario", name="aim"),
migrations.RemoveField(model_name="energyscenario", name="author_Institution"),
migrations.RemoveField(model_name="energyscenario", name="citation_doi"),
migrations.RemoveField(model_name="energyscenario", name="citation_reference"),
migrations.RemoveField(model_name="energyscenario", name="client"),
migrations.RemoveField(
model_name="energyscenario", name="conventional_generation_coal"
),
migrations.RemoveField(
model_name="energyscenario", name="conventional_generation_gas"
),
migrations.RemoveField(
model_name="energyscenario", name="conventional_generation_liquid"
),
migrations.RemoveField(
model_name="energyscenario", name="conventional_generation_nuclear"
),
migrations.RemoveField(
model_name="energyscenario", name="conventional_generation_oil"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_agentbased"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_myopic"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_other"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_other_text"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_perfect"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_behavioral_qualitative"
),
migrations.RemoveField(
model_name="energyscenario", name="economic_focuses_included"
),
migrations.RemoveField(
model_name="energyscenario", name="endogenous_variables"
),
migrations.RemoveField(model_name="energyscenario", name="funding_no_funding"),
migrations.RemoveField(model_name="energyscenario", name="funding_private"),
migrations.RemoveField(model_name="energyscenario", name="funding_public"),
migrations.RemoveField(
model_name="energyscenario", name="modeled_demand_sectors_commercial_sector"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_demand_sectors_households"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_demand_sectors_industry"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_demand_sectors_transport"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_electricity"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_gas"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_heat"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_liquid_fuels"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_others"
),
migrations.RemoveField(
model_name="energyscenario", name="modeled_energy_sectors_others_text"
),
migrations.RemoveField(model_name="energyscenario", name="name_of_the_study"),
migrations.RemoveField(
model_name="energyscenario", name="networks_electricity_gas_electricity"
),
migrations.RemoveField(
model_name="energyscenario", name="networks_electricity_gas_gas"
),
migrations.RemoveField(
model_name="energyscenario", name="networks_electricity_gas_heat"
),
migrations.RemoveField(model_name="energyscenario", name="new_aspects"),
migrations.RemoveField(model_name="energyscenario", name="renewables_PV"),
migrations.RemoveField(model_name="energyscenario", name="renewables_biogas"),
migrations.RemoveField(model_name="energyscenario", name="renewables_biomass"),
migrations.RemoveField(model_name="energyscenario", name="renewables_hydro"),
migrations.RemoveField(model_name="energyscenario", name="renewables_others"),
migrations.RemoveField(
model_name="energyscenario", name="renewables_others_text"
),
migrations.RemoveField(model_name="energyscenario", name="renewables_solar"),
migrations.RemoveField(model_name="energyscenario", name="renewables_wind"),
migrations.RemoveField(model_name="energyscenario", name="sensitivities"),
migrations.RemoveField(
model_name="energyscenario", name="social_focuses_included"
),
migrations.RemoveField(
model_name="energyscenario", name="spatial_Geographical_coverage"
),
migrations.RemoveField(model_name="energyscenario", name="storages_CAES"),
migrations.RemoveField(model_name="energyscenario", name="storages_PHS"),
migrations.RemoveField(model_name="energyscenario", name="storages_battery"),
migrations.RemoveField(model_name="energyscenario", name="storages_chemical"),
migrations.RemoveField(model_name="energyscenario", name="storages_kinetic"),
migrations.RemoveField(model_name="energyscenario", name="time_frame_2020"),
migrations.RemoveField(model_name="energyscenario", name="time_frame_2030"),
migrations.RemoveField(model_name="energyscenario", name="time_frame_2050"),
migrations.RemoveField(
model_name="energyscenario", name="time_frame_2_target_year"
),
migrations.RemoveField(
model_name="energyscenario", name="time_frame_2_transformation_path"
),
migrations.RemoveField(model_name="energyscenario", name="time_frame_other"),
migrations.RemoveField(
model_name="energyscenario", name="time_frame_other_text"
),
migrations.RemoveField(model_name="energyscenario", name="time_steps_15_min"),
migrations.RemoveField(model_name="energyscenario", name="time_steps_1_min"),
migrations.RemoveField(model_name="energyscenario", name="time_steps_anual"),
migrations.RemoveField(model_name="energyscenario", name="time_steps_hour"),
migrations.RemoveField(model_name="energyscenario", name="time_steps_other"),
migrations.RemoveField(
model_name="energyscenario", name="time_steps_other_text"
),
migrations.RemoveField(model_name="energyscenario", name="time_steps_sec"),
migrations.RemoveField(model_name="energyscenario", name="tools_models"),
migrations.RemoveField(model_name="energyscenario", name="tools_other"),
migrations.AlterField(
model_name="energymodel",
name="transfer_electricity_transition",
field=models.BooleanField(default=False, verbose_name="transmission"),
),
migrations.AlterField(
model_name="energymodel",
name="transfer_gas_transition",
field=models.BooleanField(default=False, verbose_name="transmission"),
),
migrations.AlterField(
model_name="energymodel",
name="transfer_heat_transition",
field=models.BooleanField(default=False, verbose_name="transmission"),
),
migrations.AlterField(
model_name="energyscenario",
name="emission_reductions_amount",
field=models.SmallIntegerField(
help_text="Development of emissions",
null=True,
verbose_name="Emission reductions",
),
),
migrations.AlterField(
model_name="energyscenario",
name="emission_reductions_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="energy_saving_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="potential_energy_savings_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="share_RE_heat_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="share_RE_mobility_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="share_RE_power_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AlterField(
model_name="energyscenario",
name="share_RE_total_kind",
field=models.CharField(
choices=[
("until", "until"),
("per", "per"),
("not estimated", "not estimated"),
],
default="not estimated",
max_length=15,
null=True,
),
),
migrations.AddField(
model_name="energystudy",
name="tools_models",
field=models.ForeignKey(
help_text="Which model(s) and other tools have been used?",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="modelview.Energymodel",
verbose_name="Tools",
),
),
migrations.AddField(
model_name="energyscenario",
name="study",
field=models.ForeignKey(
blank=True,
db_column="name_of_the_study_id",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="modelview.Energystudy",
),
),
]
|
migonzalvar/alpha | refs/heads/master | webapp/tests/test_functional.py | 2 | import pytest
from . import pages
@pytest.mark.selenium
def test_main(selenium, live_server, admin_user):
selenium.maximize_window()
selenium.get(live_server.url)
(
pages
.Login(selenium)
.login('admin', 'password')
.logout()
)
selenium.quit()
|
washort/zamboni | refs/heads/master | mkt/features/tests/test_views.py | 21 | from nose.tools import eq_
from django.core.urlresolvers import reverse
from mkt.api.tests.test_oauth import RestOAuth
from mkt.constants.features import APP_FEATURES, FeatureProfile
class TestConfig(RestOAuth):
def setUp(self):
super(TestConfig, self).setUp()
self.url = reverse('api-features-feature-list')
def _test_response(self, res):
eq_(res.status_code, 200)
data = res.json
eq_(len(data), len(APP_FEATURES))
self.assertSetEqual(data.keys(),
[f.lower() for f in APP_FEATURES.keys()])
for i, feature in enumerate(APP_FEATURES.items()):
name = feature[0].lower()
eq_(i + 1, data[name]['position'])
def test_with_profile(self):
profile = FeatureProfile(apps=True).to_signature()
res = self.anon.get(self.url, {'pro': profile})
self._test_response(res)
eq_(res.json['apps']['present'], True)
eq_(res.json['audio']['present'], False)
def test_anon(self):
res = self.anon.get(self.url)
self._test_response(res)
def test_authenticated(self):
res = self.client.get(self.url)
self._test_response(res)
def test_post(self):
res = self.client.post(self.url)
eq_(res.status_code, 405)
|
LibrIT/passhport | refs/heads/master | passhportd/migrations/versions/7e10f8660fa5_.py | 3 | """empty message
Revision ID: 7e10f8660fa5
Revises: a468530a43fa
Create Date: 2018-05-11 21:59:01.031948
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7e10f8660fa5'
down_revision = 'a468530a43fa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('passentry',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('connectiondate', sa.String(length=20), nullable=True),
sa.Column('password', sa.LargeBinary(length=500), nullable=True),
sa.Column('salt', sa.LargeBinary(length=500), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_passentry_connectiondate'), 'passentry', ['connectiondate'], unique=False)
op.create_index(op.f('ix_passentry_password'), 'passentry', ['password'], unique=False)
op.create_index(op.f('ix_passentry_salt'), 'passentry', ['salt'], unique=False)
op.create_table('target_pass',
sa.Column('target_id', sa.Integer(), nullable=False),
sa.Column('logentry_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['logentry_id'], ['passentry.id'], ),
sa.ForeignKeyConstraint(['target_id'], ['target.id'], ),
sa.PrimaryKeyConstraint('target_id', 'logentry_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('target_pass')
op.drop_index(op.f('ix_passentry_salt'), table_name='passentry')
op.drop_index(op.f('ix_passentry_password'), table_name='passentry')
op.drop_index(op.f('ix_passentry_connectiondate'), table_name='passentry')
op.drop_table('passentry')
# ### end Alembic commands ###
|
DistrictDataLabs/minimum-entropy | refs/heads/master | tagging/models.py | 1 | # tagging.models
# Models for the tagging app
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Wed Jul 06 15:34:02 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: models.py [c5d00aa] benjamin@bengfort.com $
"""
Models for the tagging app
"""
##########################################################################
## Imports
##########################################################################
from django.db import models
from autoslug import AutoSlugField
from model_utils.models import TimeStampedModel
from minent.utils import nullable, notnullable
from tagging.managers import TagManager
from slugify import slugify
##########################################################################
## Question Tags
##########################################################################
class Tag(TimeStampedModel):
text = models.CharField(max_length=100, unique=True)
slug = AutoSlugField(populate_from='text', slugify=slugify, unique=True)
description = models.CharField(max_length=255, **nullable)
creator = models.ForeignKey('auth.User', related_name='tags')
is_synonym = models.BooleanField(default=False)
head_word = models.ForeignKey('self', related_name='synonyms', **nullable)
## Set custom tag manager
objects = TagManager()
class Meta:
db_table = "tags"
get_latest_by = "created"
def __str__(self):
return self.text
|
neoareslinux/neutron | refs/heads/master | neutron/plugins/oneconvergence/agent/__init__.py | 12133432 | |
emetsger/osf.io | refs/heads/develop | api_tests/institutions/__init__.py | 12133432 | |
socialwifi/dila | refs/heads/master | tests/__init__.py | 12133432 | |
caseyrollins/osf.io | refs/heads/develop | api_tests/guids/__init__.py | 12133432 | |
vdmann/cse-360-image-hosting-website | refs/heads/master | lib/python2.7/site-packages/django/conf/locale/bs/__init__.py | 12133432 | |
pygeek/django | refs/heads/master | django/conf/locale/pt_BR/__init__.py | 12133432 | |
gannetson/django | refs/heads/master | tests/admin_custom_urls/__init__.py | 12133432 | |
andrei4ka/fuel-web-redhat | refs/heads/master | nailgun/nailgun/plugins/attr_plugin.py | 1 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
from urlparse import urljoin
import yaml
from nailgun.logger import logger
from nailgun.settings import settings
class ClusterAttributesPlugin(object):
"""Implements wrapper for plugin db model to provide
logic related to configuration files.
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
4. Providing repositories/deployment scripts related info to clients
"""
environment_config_name = 'environment_config.yaml'
task_config_name = 'tasks.yaml'
def __init__(self, plugin):
self.plugin = plugin
self.plugin_path = os.path.join(
settings.PLUGINS_PATH,
self.full_name)
self.config_file = os.path.join(
self.plugin_path,
self.environment_config_name)
self.tasks = []
def _load_config(self, config):
if os.access(config, os.R_OK):
with open(config, "r") as conf:
return yaml.load(conf.read())
else:
logger.warning("Config {0} is not readable.".format(config))
def get_plugin_attributes(self, cluster):
"""Should be used for initial configuration uploading to
custom storage. Will be invoked in 2 cases:
1. Cluster is created but there was no plugins in system
on that time, so when plugin is uploaded we need to iterate
over all clusters and decide if plugin should be applied
2. Plugins is uploaded before cluster creation, in this case
we will iterate over all plugins and upload configuration for them
In this case attributes will be added to same cluster attributes
model and stored in editable field
"""
config = {}
if os.path.exists(self.config_file):
config = self._load_config(self.config_file)
if self.validate_cluster_compatibility(cluster):
attrs = config.get("attributes", {})
self.update_metadata(attrs)
return {self.plugin.name: attrs}
return {}
def validate_cluster_compatibility(self, cluster):
"""Validates if plugin is compatible with cluster.
- validates operating systems
- modes of clusters (simple or ha)
- release version
"""
for release in self.plugin.releases:
os_compat = (cluster.release.operating_system.lower()
== release['os'].lower())
# plugin writer should be able to specify ha in release['mode']
# and know nothing about ha_compact
mode_compat = any(mode in cluster.mode for mode in release['mode'])
release_version_compat = (
cluster.release.version == release['version'])
if all((os_compat, mode_compat, release_version_compat)):
return True
return False
def process_cluster_attributes(self, cluster, cluster_attrs):
"""Checks cluster attributes for plugin related metadata.
Then enable or disable plugin for cluster based on metadata
enabled field.
"""
custom_attrs = cluster_attrs.get(self.plugin.name, {})
if custom_attrs:
# Skip if it's wrong plugin version
attr_plugin_version = custom_attrs['metadata']['plugin_version']
if attr_plugin_version != self.plugin.version:
return
enable = custom_attrs['metadata']['enabled']
# value is true and plugin is not enabled for this cluster
# that means plugin was enabled on this request
if enable and cluster not in self.plugin.clusters:
self.plugin.clusters.append(cluster)
# value is false and plugin is enabled for this cluster
# that means plugin was disabled on this request
elif not enable and cluster in self.plugin.clusters:
self.plugin.clusters.remove(cluster)
def update_metadata(self, attributes):
"""Overwrights only default values in metadata.
Plugin should be able to provide UI "native" conditions
to enable/disable plugin on UI itself
"""
attributes.setdefault('metadata', {})
attributes['metadata'].update(self.default_metadata)
return attributes
@property
def default_metadata(self):
return {u'enabled': False, u'toggleable': True,
u'weight': 70, u'label': self.plugin.title,
'plugin_version': self.plugin.version}
def set_cluster_tasks(self, cluster):
"""Loads plugins provided tasks from tasks config file and
sets them to instance tasks variable.
"""
task_yaml = os.path.join(
self.plugin_path,
self.task_config_name)
if os.path.exists(task_yaml):
self.tasks = self._load_config(task_yaml)
def filter_tasks(self, tasks, stage):
filtered = []
for task in tasks:
if stage and stage == task.get('stage'):
filtered.append(task)
return filtered
@property
def plugin_release_versions(self):
if not self.plugin.releases:
return set()
return set([rel['version'] for rel in self.plugin.releases])
@property
def full_name(self):
return u'{0}-{1}'.format(self.plugin.name, self.plugin.version)
def get_release_info(self, release):
"""Returns plugin release information which corresponds to
a provided release.
"""
os = release.operating_system.lower()
version = release.version
release_info = filter(
lambda r: (r['os'] == os and
r['version'] == version),
self.plugin.releases)
return release_info[0]
@property
def slaves_scripts_path(self):
return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.full_name)
def repo_files(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_path = os.path.join(
settings.PLUGINS_PATH,
self.full_name,
release_info['repository_path'],
'*')
return glob.glob(repo_path)
def repo_url(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.full_name)
return urljoin(repo_base, release_info['repository_path'])
def master_scripts_path(self, cluster):
release_info = self.get_release_info(cluster.release)
# NOTE(eli): we cannot user urljoin here, because it
# works wrong in case, if protocol is rsync
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.full_name)
return '{0}{1}'.format(
base_url,
release_info['deployment_scripts_path'])
|
calebfoss/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/categorical.py | 9 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Categorical(distribution.Distribution):
"""Categorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes.
#### Examples
Creates a 3-class distiribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = Categorical(p=p)
```
Creates a 3-class distiribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-50, 400, 40]
dist = Categorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(p=p)
dist.pmf(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.pmf(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7, 3]
```
"""
def __init__(
self,
logits=None,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`p` should be passed in.
p: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `p` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Unused in this distribution.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[logits]) as ns:
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if logits_shape_static[-1].value is not None:
self._num_classes = ops.convert_to_tensor(
logits_shape_static[-1].value,
dtype=dtypes.int32,
name="num_classes")
else:
self._num_classes = array_ops.gather(logits_shape,
self._batch_rank,
name="num_classes")
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits, self._num_classes],
name=ns)
@property
def num_classes(self):
"""Scalar `int32` tensor: the number of classes."""
return self._num_classes
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def p(self):
"""Vector of probabilities summing to one.
Each element is the probability of drawing that coordinate."""
return self._p
def _batch_shape(self):
# Use identity to inherit callers "name".
return array_ops.identity(self._batch_shape_val)
def _get_batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(([n], self.batch_shape()), 0))
return ret
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.logits.get_shape()[:-1] == k.get_shape():
logits = self.logits
else:
logits = self.logits * array_ops.ones_like(
array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
logits_shape = array_ops.shape(logits)[:-1]
k *= array_ops.ones(logits_shape, dtype=k.dtype)
k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
logits=logits)
def _prob(self, k):
return math_ops.exp(self._log_prob(k))
def _entropy(self):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
histogram_2d = nn_ops.softmax(logits_2d)
ret = array_ops.reshape(
nn_ops.softmax_cross_entropy_with_logits(labels=histogram_2d,
logits=logits_2d),
self.batch_shape())
ret.set_shape(self.get_batch_shape())
return ret
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.get_batch_shape())
return ret
@kullback_leibler.RegisterKL(Categorical, Categorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Categorical.
Args:
a: instance of a Categorical distribution object.
b: instance of a Categorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(
name, "kl_categorical_categorical", [a.logits, b.logits]):
# sum(p*ln(p/q))
return math_ops.reduce_sum(
nn_ops.softmax(a.logits)*(nn_ops.log_softmax(a.logits)
- nn_ops.log_softmax(b.logits)), reduction_indices=[-1])
|
sergiocorato/partner-contact | refs/heads/10.0 | partner_multi_relation/models/res_partner_relation.py | 5 | # -*- coding: utf-8 -*-
# © 2013-2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
"""Store relations (connections) between partners."""
from openerp import _, api, fields, models
from openerp.exceptions import ValidationError
class ResPartnerRelation(models.Model):
"""Model res.partner.relation is used to describe all links or relations
between partners in the database.
This model is actually only used to store the data. The model
res.partner.relation.all, based on a view that contains each record
two times, once for the normal relation, once for the inverse relation,
will be used to maintain the data.
"""
_name = 'res.partner.relation'
_description = 'Partner relation'
left_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Source Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
right_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Destination Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
type_id = fields.Many2one(
comodel_name='res.partner.relation.type',
string='Type',
required=True,
auto_join=True,
)
date_start = fields.Date('Starting date')
date_end = fields.Date('Ending date')
@api.model
def create(self, vals):
"""Override create to correct values, before being stored."""
context = self.env.context
if 'left_partner_id' not in vals and context.get('active_id'):
vals['left_partner_id'] = context.get('active_id')
return super(ResPartnerRelation, self).create(vals)
@api.one
@api.constrains('date_start', 'date_end')
def _check_dates(self):
"""End date should not be before start date, if not filled
:raises ValidationError: When constraint is violated
"""
if (self.date_start and self.date_end and
self.date_start > self.date_end):
raise ValidationError(
_('The starting date cannot be after the ending date.')
)
@api.one
@api.constrains('left_partner_id', 'type_id')
def _check_partner_left(self):
"""Check left partner for required company or person
:raises ValidationError: When constraint is violated
"""
self._check_partner("left")
@api.one
@api.constrains('right_partner_id', 'type_id')
def _check_partner_right(self):
"""Check right partner for required company or person
:raises ValidationError: When constraint is violated
"""
self._check_partner("right")
@api.one
def _check_partner(self, side):
"""Check partner for required company or person, and for category
:param str side: left or right
:raises ValidationError: When constraint is violated
"""
assert side in ['left', 'right']
ptype = getattr(self.type_id, "contact_type_%s" % side)
partner = getattr(self, '%s_partner_id' % side)
if ((ptype == 'c' and not partner.is_company) or
(ptype == 'p' and partner.is_company)):
raise ValidationError(
_('The %s partner is not applicable for this relation type.') %
side
)
category = getattr(self.type_id, "partner_category_%s" % side)
if category and category.id not in partner.category_id.ids:
raise ValidationError(
_('The %s partner does not have category %s.') %
(side, category.name)
)
@api.one
@api.constrains('left_partner_id', 'right_partner_id')
def _check_not_with_self(self):
"""Not allowed to link partner to same partner
:raises ValidationError: When constraint is violated
"""
if self.left_partner_id == self.right_partner_id:
if not (self.type_id and self.type_id.allow_self):
raise ValidationError(
_('Partners cannot have a relation with themselves.')
)
@api.one
@api.constrains(
'left_partner_id',
'type_id',
'right_partner_id',
'date_start',
'date_end',
)
def _check_relation_uniqueness(self):
"""Forbid multiple active relations of the same type between the same
partners
:raises ValidationError: When constraint is violated
"""
# pylint: disable=no-member
# pylint: disable=no-value-for-parameter
domain = [
('type_id', '=', self.type_id.id),
('id', '!=', self.id),
('left_partner_id', '=', self.left_partner_id.id),
('right_partner_id', '=', self.right_partner_id.id),
]
if self.date_start:
domain += [
'|',
('date_end', '=', False),
('date_end', '>=', self.date_start),
]
if self.date_end:
domain += [
'|',
('date_start', '=', False),
('date_start', '<=', self.date_end),
]
if self.search(domain):
raise ValidationError(
_('There is already a similar relation with overlapping dates')
)
|
vaultah/L | refs/heads/master | app/tests/test_notifications.py | 1 | import pytest
from app.el import notifications as no
from app.el.accounts.records import Record
types = ('MentionNotification', 'ReplyPostNotification', 'ReplyImageNotification',
'SharedPostNotification', 'SharedImageNotification', 'FriendNotification',
'FollowerNotification')
@pytest.mark.parametrize('nt', types, ids=types)
def test_notifications(nt):
this, other = Record.new(), Record.new()
klass = getattr(no, nt)
o = klass.new(this, other=other)
assert o.good()
assert o.id
assert list(no.load(this))
no.Notification.delete(this, [o.id])
assert not list(no.load(this)) |
pietern/caffe2 | refs/heads/master | caffe2/python/operator_test/counter_ops_test.py | 4 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import tempfile
class TestCounterOps(TestCase):
def test_counter_ops(self):
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['c'], init_count=1))
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t1'])) # 1 -> 0
assert not workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t2'])) # 0 -> -1
assert workspace.FetchBlob('t2')
workspace.RunOperatorOnce(core.CreateOperator(
'CountUp', ['c'], ['t21'])) # -1 -> 0
assert workspace.FetchBlob('t21') == -1
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['c'], ['t22']))
assert workspace.FetchBlob('t22') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], [], init_count=1)) # -> 1
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t3'])) # 1 -> 0
assert not workspace.FetchBlob('t3')
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t31'], init_count=5)) # 0 -> 5
assert workspace.FetchBlob('t31') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t32'])) # 5 -> 0
assert workspace.FetchBlob('t32') == 5
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t4'], value=False, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t4') == workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t5'], value=True, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t5') == workspace.FetchBlob('t2')
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t1', 't2'], ['t6']))
assert not workspace.FetchBlob('t6') # True && False
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t2', 't5'], ['t7']))
assert workspace.FetchBlob('t7') # True && True
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['serialized_c'], init_count=22))
with tempfile.NamedTemporaryFile() as tmp:
workspace.RunOperatorOnce(core.CreateOperator(
'Save', ['serialized_c'], [], absolute_path=1,
db_type='minidb', db=tmp.name))
for i in range(10):
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['serialized_c'], ['t8']))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 12
workspace.RunOperatorOnce(core.CreateOperator(
'Load', [], ['serialized_c'], absolute_path=1,
db_type='minidb', db=tmp.name))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 22
if __name__ == "__main__":
import unittest
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.