code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pytest
from zeroless import (Server, Client)
class TestClientServer:
def test_server_port_property(self):
port = 1050
server = Server(port=port)
assert server.port == port
def test_client_addresses_property(self):
client = Client()
addresses = (('10.0.0.1', 1567), ('10.0.0.2', 1568), ('10.0.0.3', 1569))
for ip, port in addresses:
client.connect(ip, port)
assert client.addresses == addresses
|
x8lucas8x/python-zeroless
|
tests/test_client_server.py
|
Python
|
lgpl-2.1
| 483
|
#!/usr/bin/python3 -u
#---- Includes ----#
from ..army_architect import Bot_blueprint, Army_blueprint
#---- General Settings ----#
army_name = "The Hungry Zombie Kittens"
army_description = "Previously these kittens ate cat food. But now they wan't to eat your freakin' soul! (And your body to of course, after they ripped it asunder ;,,,;)"
#---- Bot Patterns ----#
class Zombie_kitten(Bot_blueprint):
name = "Zombie Kitten"
skin = "HK-KittyZombie.png"
ai_diff = 0
ai_diff_dynamic = True
can_use_ninja = 0
can_use_ninja_dynamic = False
shield_factor = 0.6
shield_factor_dynamic = True
damage_factor = 1
damage_factor_dynamic = False
speed_factor = 0.6
speed_factor_dynamic = False
class Franken_kitten(Bot_blueprint):
name = "Franken Kitten"
skin = "HK-FrankenKitty.png"
ai_diff = 0
ai_diff_dynamic = True
can_use_ninja = 1
can_use_ninja_dynamic = True
shield_factor = 0.6
shield_factor_dynamic = False
damage_factor = 1
damage_factor_dynamic = False
speed_factor = 0.8
speed_factor_dynamic = True
class Ghoul_kitten(Bot_blueprint):
name = "Ghoul Kitten"
skin = "HK-KittyMime.png"
ai_diff = 2
ai_diff_dynamic = True
can_use_ninja = 1
can_use_ninja_dynamic = True
shield_factor = 0.5
shield_factor_dynamic = True
damage_factor = 1.0
damage_factor_dynamic = False
speed_factor = 1.6
speed_factor_dynamic = True
def generate_blueprint(challenge_amount, lives, team):
global army_name, army_description
#---- Generate a list of of bot blueprints ----#
blueprints = []
# How many of each bot per challenge amount:
blueprint_weights = {
Zombie_kitten: 2,
Franken_kitten: 1,
Ghoul_kitten: 0.5,
}
for blueprint, wieght in blueprint_weights.items():
num_bots = round(wieght * challenge_amount)
for x in range(num_bots):
blueprints.append(blueprint(lives, team))
#---- Scale the blueprints ----#
# Sum the current default_challenge_amount
default_challenge_amount_sum = 0
for blueprint in blueprints:
default_challenge_amount_sum += blueprint.default_challenge_amount
scale_factor = challenge_amount / default_challenge_amount_sum
for blueprint in blueprints:
blueprint.scale_challenge_amount_with(scale_factor)
#---- Return the army ----#
return Army_blueprint(army_name, army_description, blueprints)
|
ProfessorKaos64/openlierox
|
share/gamedir/scripts/tools/army_patterns/hungry_zombie_kittens.py
|
Python
|
lgpl-2.1
| 2,288
|
###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
from PyQt4.QtDesigner import QPyDesignerCustomWidgetPlugin
from PyQt4.QtGui import QPixmap, QIcon, QColor
from volumina.widgets.layerwidget import LayerWidget
from volumina.layerstack import LayerStackModel, Layer
class PyLayerWidgetPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
QPyDesignerCustomWidgetPlugin.__init__(self)
self.initialized = False
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
model = LayerStackModel()
o1 = Layer()
o1.name = "Fancy Layer"
o1.opacity = 0.5
model.append(o1)
o2 = Layer()
o2.name = "Some other Layer"
o2.opacity = 0.25
model.append(o2)
o3 = Layer()
o3.name = "Invisible Layer"
o3.opacity = 0.15
o3.visible = False
model.append(o3)
o4 = Layer()
o4.name = "Fancy Layer II"
o4.opacity = 0.95
model.append(o4)
o5 = Layer()
o5.name = "Fancy Layer III"
o5.opacity = 0.65
model.append(o5)
view = LayerWidget(parent, model)
view.updateGeometry()
return view
def name(self):
return "LayerWidget"
def group(self):
return "ilastik widgets"
def icon(self):
return QIcon(QPixmap(16,16))
def toolTip(self):
return ""
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return (
'<widget class="LayerWidget" name=\"layerWidget\">\n'
"</widget>\n"
)
def includeFile(self):
return "volumina.widgets.layerwidget"
|
jakirkham/volumina
|
volumina/layerwidget_plugin.py
|
Python
|
lgpl-3.0
| 3,021
|
from rez.exceptions import ReleaseVCSError
from rez.packages_ import get_developer_package
from rez.util import which
from rez.utils.system import popen
from rez.utils.logging_ import print_debug
from rez.utils.filesystem import walk_up_dirs
from pipes import quote
import subprocess
def get_release_vcs_types():
"""Returns the available VCS implementations - git, hg etc."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('release_vcs')
def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root)
class ReleaseVCS(object):
"""A version control system (VCS) used to release Rez packages.
"""
def __init__(self, pkg_root, vcs_root=None):
if vcs_root is None:
result = self.find_vcs_root(pkg_root)
if not result:
raise ReleaseVCSError("Could not find %s repository for the "
"path %s" % (self.name(), pkg_root))
vcs_root = result[0]
else:
assert(self.is_valid_root(vcs_root))
self.vcs_root = vcs_root
self.pkg_root = pkg_root
self.package = get_developer_package(pkg_root)
self.type_settings = self.package.config.plugins.release_vcs
self.settings = self.type_settings.get(self.name())
@classmethod
def name(cls):
"""Return the name of the VCS type, eg 'git'."""
raise NotImplementedError
@classmethod
def find_executable(cls, name):
exe = which(name)
if not exe:
raise ReleaseVCSError("Couldn't find executable '%s' for VCS '%s'"
% (name, cls.name()))
return exe
@classmethod
def is_valid_root(cls, path):
"""Return True if the given path is a valid root directory for this
version control system.
Note that this is different than whether the path is under the
control of this type of vcs; to answer that question,
use find_vcs_root
"""
raise NotImplementedError
@classmethod
def search_parents_for_root(cls):
"""Return True if this vcs type should check parent directories to
find the root directory
"""
raise NotImplementedError
@classmethod
def find_vcs_root(cls, path):
"""Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None
"""
if cls.search_parents_for_root():
valid_dirs = walk_up_dirs(path)
else:
valid_dirs = [path]
for i, current_path in enumerate(valid_dirs):
if cls.is_valid_root(current_path):
return current_path, i
return None
def validate_repostate(self):
"""Ensure that the VCS working copy is up-to-date."""
raise NotImplementedError
def get_current_revision(self):
"""Get the current revision, this can be any type (str, dict etc)
appropriate to your VCS implementation.
Note:
You must ensure that a revision contains enough information to
clone/export/checkout the repo elsewhere - otherwise you will not
be able to implement `export`.
"""
raise NotImplementedError
def get_changelog(self, previous_revision=None, max_revisions=None):
"""Get the changelog text since the given revision.
If previous_revision is not an ancestor (for example, the last release
was from a different branch) you should still return a meaningful
changelog - perhaps include a warning, and give changelog back to the
last common ancestor.
Args:
previous_revision: The revision to give the changelog since. If
None, give the entire changelog.
Returns:
Changelog, as a string.
"""
raise NotImplementedError
def tag_exists(self, tag_name):
"""Test if a tag exists in the repo.
Args:
tag_name (str): Tag name to check for.
Returns:
bool: True if the tag exists, False otherwise.
"""
raise NotImplementedError
def create_release_tag(self, tag_name, message=None):
"""Create a tag in the repo.
Create a tag in the repository representing the release of the
given version.
Args:
tag_name (str): Tag name to write to the repo.
message (str): Message string to associate with the release.
"""
raise NotImplementedError
@classmethod
def export(cls, revision, path):
"""Export the repository to the given path at the given revision.
Note:
The directory at `path` must not exist, but the parent directory
must exist.
Args:
revision (object): Revision to export; current revision if None.
path (str): Directory to export the repository to.
"""
raise NotImplementedError
def _cmd(self, *nargs):
"""Convenience function for executing a program such as 'git' etc."""
cmd_str = ' '.join(map(quote, nargs))
if self.package.config.debug("package_release"):
print_debug("Running command: %s" % cmd_str)
p = popen(nargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.pkg_root)
out, err = p.communicate()
if p.returncode:
print_debug("command stdout:")
print_debug(out)
print_debug("command stderr:")
print_debug(err)
raise ReleaseVCSError("command failed: %s\n%s" % (cmd_str, err))
out = out.strip()
if out:
return [x.rstrip() for x in out.split('\n')]
else:
return []
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
cwmartin/rez
|
src/rez/release_vcs.py
|
Python
|
lgpl-3.0
| 8,750
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationradiuspolicy_authenticationvserver_binding(base_resource) :
""" Binding class showing the authenticationvserver that can be bound to authenticationradiuspolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""Name of the RADIUS authentication policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the RADIUS authentication policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
ur"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationradiuspolicy_authenticationvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationradiuspolicy_authenticationvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch authenticationradiuspolicy_authenticationvserver_binding resources.
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of authenticationradiuspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count authenticationradiuspolicy_authenticationvserver_binding resources configued on NetScaler.
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of authenticationradiuspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationradiuspolicy_authenticationvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationradiuspolicy_authenticationvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationradiuspolicy_authenticationvserver_binding = [authenticationradiuspolicy_authenticationvserver_binding() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationradiuspolicy_authenticationvserver_binding.py
|
Python
|
apache-2.0
| 5,569
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
YoshikawaMasashi/magenta
|
magenta/music/notebook_utils.py
|
Python
|
apache-2.0
| 1,557
|
import sys
sys.path.insert(1, "../../../")
import h2o
def ozoneKM(ip, port):
# Connect to a pre-existing cluster
# connect to localhost:54321
train = h2o.import_file(path=h2o.locate("smalldata/glm_test/ozone.csv"))
# See that the data is ready
print train.describe()
# Run KMeans
my_km = h2o.kmeans(x=train,
k=10,
init = "PlusPlus",
max_iterations = 100)
my_km.show()
my_km.summary()
my_pred = my_km.predict(train)
my_pred.describe()
if __name__ == "__main__":
h2o.run_test(sys.argv, ozoneKM)
|
PawarPawan/h2o-v3
|
h2o-py/tests/testdir_algos/kmeans/pyunit_ozoneKmeans.py
|
Python
|
apache-2.0
| 589
|
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains a set of functions to create astroid trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
import inspect
import logging
import os
import sys
import types
import six
from astroid import bases
from astroid import manager
from astroid import node_classes
from astroid import nodes
MANAGER = manager.AstroidManager()
# the keys of CONST_CLS eg python builtin types
_CONSTANTS = tuple(node_classes.CONST_CLS)
_JYTHON = os.name == 'java'
_BUILTINS = vars(six.moves.builtins)
_LOG = logging.getLogger(__name__)
def _io_discrepancy(member):
# _io module names itself `io`: http://bugs.python.org/issue18602
member_self = getattr(member, '__self__', None)
return (member_self and
inspect.ismodule(member_self) and
member_self.__name__ == '_io' and
member.__module__ == 'io')
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
def _add_dunder_class(func, member):
"""Add a __class__ member to the given func node, if we can determine it."""
python_cls = member.__class__
cls_name = getattr(python_cls, '__name__', None)
if not cls_name:
return
cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__]
ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__)
func.instance_attrs['__class__'] = [ast_klass]
_marker = object()
def attach_dummy_node(node, name, runtime_object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = nodes.EmptyNode()
enode.object = runtime_object
_attach_local_node(node, enode, name)
def _has_underlying_object(self):
return self.object is not None and self.object is not _marker
nodes.EmptyNode.has_underlying_object = _has_underlying_object
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if name not in node.special_attributes:
_attach_local_node(node, nodes.const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a ImportFrom node and register it in the locals of the given
node with the specified name
"""
from_node = nodes.ImportFrom(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astroid Module node"""
node = nodes.Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astroid ClassDef node"""
node = nodes.ClassDef(name, doc)
for base in basenames:
basenode = nodes.Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, doc=None):
"""create and initialize a astroid FunctionDef node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = nodes.FunctionDef(name, doc)
func.args = argsnode = nodes.Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(nodes.Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(nodes.const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astroid ImportFrom import statement"""
return nodes.ImportFrom(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, nodes.Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astroid for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astroid for a living function object"""
# pylint: disable=deprecated-method; completely removed in 2.0
args, varargs, varkw, defaults = inspect.getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astroid for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astroid for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except: # pylint: disable=bare-except
pass
else:
for item_name, obj in instdict.items():
valnode = nodes.EmptyNode()
valnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[item_name] = [valnode]
return klass
def _build_from_function(node, name, member, module):
# verify this is not an imported function
try:
code = six.get_function_code(member)
except AttributeError:
# Some implementations don't provide the code object,
# such as Jython.
code = None
filename = getattr(code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a really minimal representation, including only Module,
FunctionDef and ClassDef nodes and some others as guessed.
"""
# astroid from living objects ###############################################
def __init__(self):
self._done = {}
self._module = None
def inspect_build(self, module, modname=None, path=None):
"""build astroid from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
try:
node = build_module(modname, module.__doc__)
except AttributeError:
# in jython, java modules have no __doc__ (see #109562)
node = build_module(modname)
node.file = node.path = os.path.abspath(path) if path else path
node.name = modname
MANAGER.cache_module(node)
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = six.get_method_function(member)
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if (not _io_discrepancy(member) and
self.imported_member(node, member, name)):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if class_node not in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == '__class__' and class_node.parent is None:
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
# This should be called for Jython, where some builtin
# methods aren't caught by isbuiltin branch.
_build_from_function(node, name, member, self._module)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, '__module__', None)
except: # pylint: disable=bare-except
_LOG.exception('unexpected error while building '
'astroid from living object')
modname = None
if modname is None:
if (name in ('__new__', '__subclasshook__')
or (name in _BUILTINS and _JYTHON)):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = six.moves.builtins.__name__
else:
attach_dummy_node(node, name, member)
return True
real_name = {
'gtk': 'gtk_gtk',
'_io': 'io',
}.get(modname, modname)
if real_name != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
### astroid bootstrapping ######################################################
Astroid_BUILDER = InspectBuilder()
_CONST_PROXY = {}
def _astroid_bootstrapping(astroid_builtin=None):
"""astroid boot strapping the builtins module"""
# this boot strapping is necessary since we need the Const nodes to
# inspect_build builtins, and then we can proxy Const
if astroid_builtin is None:
from six.moves import builtins
astroid_builtin = Astroid_BUILDER.inspect_build(builtins)
# pylint: disable=redefined-outer-name
for cls, node_cls in node_classes.CONST_CLS.items():
if cls is type(None):
proxy = build_class('NoneType')
proxy.parent = astroid_builtin
elif cls is type(NotImplemented):
proxy = build_class('NotImplementedType')
proxy.parent = astroid_builtin
else:
proxy = astroid_builtin.getattr(cls.__name__)[0]
if cls in (dict, list, set, tuple):
node_cls._proxied = proxy
else:
_CONST_PROXY[cls] = proxy
_astroid_bootstrapping()
# TODO : find a nicer way to handle this situation;
# However __proxied introduced an
# infinite recursion (see https://bugs.launchpad.net/pylint/+bug/456870)
def _set_proxied(const):
return _CONST_PROXY[const.value.__class__]
nodes.Const._proxied = property(_set_proxied)
_GeneratorType = nodes.ClassDef(types.GeneratorType.__name__, types.GeneratorType.__doc__)
_GeneratorType.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
bases.Generator._proxied = _GeneratorType
Astroid_BUILDER.object_build(bases.Generator._proxied, types.GeneratorType)
_builtins = MANAGER.astroid_cache[six.moves.builtins.__name__]
BUILTIN_TYPES = (types.GetSetDescriptorType, types.GeneratorType,
types.MemberDescriptorType, type(None), type(NotImplemented),
types.FunctionType, types.MethodType,
types.BuiltinFunctionType, types.ModuleType, types.TracebackType)
for _type in BUILTIN_TYPES:
if _type.__name__ not in _builtins:
cls = nodes.ClassDef(_type.__name__, _type.__doc__)
cls.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
Astroid_BUILDER.object_build(cls, _type)
_builtins[_type.__name__] = cls
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/astroid/raw_building.py
|
Python
|
apache-2.0
| 15,733
|
# coding:utf-8
import logging
import regex as re
import email.quoprimime
import email.base64mime
from base64 import b64encode
from flanker.mime.message import charsets, errors
log = logging.getLogger(__name__)
#deal with unfolding
foldingWhiteSpace = re.compile(r"(\n\r?|\r\n?)(\s*)")
def unfold(value):
"""
Unfolding is accomplished by simply removing any CRLF
that is immediately followed by WSP. Each header field should be
treated in its unfolded form for further syntactic and semantic
evaluation.
"""
return re.sub(foldingWhiteSpace, r"\2", value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
"""
Takes a header value and returns a fully decoded unicode string.
It differs from standard Python's mail.header.decode_header() because:
- it is higher level, i.e. returns a unicode string instead of
an array of tuples
- it accepts Unicode and non-ASCII strings as well
>>> header_to_unicode("=?UTF-8?B?UmVbMl06INCX0LXQvNC70Y/QutC4?=")
u"Земляки"
>>> header_to_unicode("hello")
u"Hello"
"""
try:
header = unfold(header)
decoded = [] # decoded parts
while header:
match = encodedWord.search(header)
if match:
start = match.start()
if start != 0:
# decodes unencoded ascii part to unicode
value = charsets.convert_to_unicode(ascii, header[0:start])
if value.strip():
decoded.append(value)
# decode a header =?...?= of encoding
charset, value = decode_part(
match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
decoded.append(charsets.convert_to_unicode(charset, value))
header = header[match.end():]
else:
# no match? append the remainder
# of the string to the list of chunks
decoded.append(charsets.convert_to_unicode(ascii, header))
break
return u"".join(decoded)
except Exception:
try:
log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
b64encode(header)))
except Exception:
log.exception("Failed to log exception")
return header
ascii = 'ascii'
#this spec refers to
#http://tools.ietf.org/html/rfc2047
encodedWord = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def decode_part(charset, encoding, value):
"""
Attempts to decode part, understands
'q' - quoted encoding
'b' - base64 mime encoding
Returns (charset, decoded-string)
"""
if encoding == 'q':
return (charset, email.quoprimime.header_decode(str(value)))
elif encoding == 'b':
# Postel's law: add missing padding
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return (charset, email.base64mime.decode(value))
elif not encoding:
return (charset, value)
else:
raise errors.DecodingError(
"Unknown encoding: {0}".format(encoding))
|
alex/flanker
|
flanker/mime/message/headers/encodedword.py
|
Python
|
apache-2.0
| 3,690
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentation video data."""
import tensorflow as tf
from feelvos import input_preprocess
from feelvos import model
from feelvos.utils import mask_damaging
from feelvos.utils import train_utils
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
MIN_LABEL_COUNT = 10
def decode_image_sequence(tensor, image_format='jpeg', shape=None,
channels=3, raw_dtype=tf.uint8):
"""Decodes a sequence of images.
Args:
tensor: the tensor of strings to decode, shape: [num_images]
image_format: a string (possibly tensor) with the format of the image.
Options include 'jpeg', 'png', and 'raw'.
shape: a list or tensor of the decoded image shape for a single image.
channels: if 'shape' is None, the third dimension of the image is set to
this value.
raw_dtype: if the image is encoded as raw bytes, this is the method of
decoding the bytes into values.
Returns:
The decoded images with shape [time, height, width, channels].
"""
handler = slim.tfexample_decoder.Image(
shape=shape, channels=channels, dtype=raw_dtype, repeated=True)
return handler.tensors_to_item({'image/encoded': tensor,
'image/format': image_format})
def _get_data(data_provider, dataset_split, video_frames_are_decoded):
"""Gets data from data provider.
Args:
data_provider: An object of slim.data_provider.
dataset_split: Dataset split.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
Returns:
image: Image Tensor.
label: Label Tensor storing segmentation annotations.
object_label: An integer refers to object_label according to labelmap. If
the example has more than one object_label, take the first one.
image_name: Image name.
height: Image height.
width: Image width.
video_id: String tensor representing the name of the video.
Raises:
ValueError: Failed to find label.
"""
if video_frames_are_decoded:
image, = data_provider.get(['image'])
else:
image, = data_provider.get(['image/encoded'])
# Some datasets do not contain image_name.
if 'image_name' in data_provider.list_items():
image_name, = data_provider.get(['image_name'])
else:
image_name = tf.constant('')
height, width = data_provider.get(['height', 'width'])
label = None
if dataset_split != 'test':
if video_frames_are_decoded:
if 'labels_class' not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get(['labels_class'])
else:
key = 'segmentation/object/encoded'
if key not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get([key])
object_label = None
video_id, = data_provider.get(['video_id'])
return image, label, object_label, image_name, height, width, video_id
def _has_foreground_and_background_in_first_frame(label, subsampling_factor):
"""Checks if the labels have foreground and background in the first frame.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
subsampling_factor: Integer, the subsampling factor.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis],
[h // subsampling_factor,
w // subsampling_factor],
align_corners=True),
axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_foreground_and_background_in_first_frame_2(label,
decoder_output_stride):
"""Checks if the labels have foreground and background in the first frame.
Second attempt, this time we use the actual output dimension for resizing.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride):
"""Checks if for each object (incl. background) enough pixels are visible.
During test time, we will usually not see a reference frame in which only
very few pixels of one object are visible. These cases can be problematic
during training, especially if more than the 1-nearest neighbor is used.
That's why this function can be used to detect and filter these cases.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have enough pixels of each object in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
_, _, counts = tf.unique_with_counts(
tf.reshape(label_downscaled, [-1]))
has_enough_pixels_per_object = tf.reduce_all(
tf.greater_equal(counts, MIN_LABEL_COUNT))
return has_enough_pixels_per_object
def get(dataset,
num_frames_per_video,
crop_size,
batch_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
preprocess_image_and_label=True,
num_readers=1,
num_threads=1,
dataset_split=None,
is_training=True,
model_variant=None,
batch_capacity_factor=32,
video_frames_are_decoded=False,
decoder_output_stride=None,
first_frame_finetuning=False,
sample_only_first_frame_for_finetuning=False,
sample_adjacent_and_consistent_query_frames=False,
remap_labels_to_reference_frame=True,
generate_prev_frame_mask_by_mask_damaging=False,
three_frame_dataset=False,
add_prev_frame_label=True):
"""Gets the dataset split for semantic segmentation.
This functions gets the dataset split for semantic segmentation. In
particular, it is a wrapper of (1) dataset_data_provider which returns the raw
dataset split, (2) input_preprcess which preprocess the raw data, and (3) the
Tensorflow operation of batching the preprocessed data. Then, the output could
be directly used by training, evaluation or visualization.
Args:
dataset: An instance of slim Dataset.
num_frames_per_video: The number of frames used per video
crop_size: Image crop size [height, width].
batch_size: Batch size.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
preprocess_image_and_label: Boolean variable specifies if preprocessing of
image and label will be performed or not.
num_readers: Number of readers for data provider.
num_threads: Number of threads for batching data.
dataset_split: Dataset split.
is_training: Is training or not.
model_variant: Model variant (string) for choosing how to mean-subtract the
images. See feature_extractor.network_map for supported model variants.
batch_capacity_factor: Batch capacity factor affecting the training queue
batch capacity.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
decoder_output_stride: Integer, the stride of the decoder output.
first_frame_finetuning: Boolean, whether to only sample the first frame
for fine-tuning.
sample_only_first_frame_for_finetuning: Boolean, whether to only sample the
first frame during fine-tuning. This should be False when using lucid or
wonderland data, but true when fine-tuning on the first frame only.
Only has an effect if first_frame_finetuning is True.
sample_adjacent_and_consistent_query_frames: Boolean, if true, the query
frames (all but the first frame which is the reference frame) will be
sampled such that they are adjacent video frames and have the same
crop coordinates and flip augmentation.
remap_labels_to_reference_frame: Boolean, whether to remap the labels of
the query frames to match the labels of the (downscaled) reference frame.
If a query frame contains a label which is not present in the reference,
it will be mapped to background.
generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate
the masks used as guidance from the previous frame by damaging the
ground truth mask.
three_frame_dataset: Boolean, whether the dataset has exactly three frames
per video of which the first is to be used as reference and the two
others are consecutive frames to be used as query frames.
add_prev_frame_label: Boolean, whether to sample one more frame before the
first query frame to obtain a previous frame label. Only has an effect,
if sample_adjacent_and_consistent_query_frames is True and
generate_prev_frame_mask_by_mask_damaging is False.
Returns:
A dictionary of batched Tensors for semantic segmentation.
Raises:
ValueError: dataset_split is None, or Failed to find labels.
"""
if dataset_split is None:
raise ValueError('Unknown dataset split.')
if model_variant is None:
tf.logging.warning('Please specify a model_variant. See '
'feature_extractor.network_map for supported model '
'variants.')
data_provider = dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=num_readers,
num_epochs=None if is_training else 1,
shuffle=is_training)
image, label, object_label, image_name, height, width, video_id = _get_data(
data_provider, dataset_split, video_frames_are_decoded)
sampling_is_valid = tf.constant(True)
if num_frames_per_video is not None:
total_num_frames = tf.shape(image)[0]
if first_frame_finetuning or three_frame_dataset:
if sample_only_first_frame_for_finetuning:
assert not sample_adjacent_and_consistent_query_frames, (
'this option does not make sense for sampling only first frame.')
# Sample the first frame num_frames_per_video times.
sel_indices = tf.tile(tf.constant(0, dtype=tf.int32)[tf.newaxis],
multiples=[num_frames_per_video])
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
num_frames_per_video += 1
# Since this is first frame fine-tuning, we'll for now assume that
# each sequence has exactly 3 images: the ref frame and 2 adjacent
# query frames.
assert num_frames_per_video == 3
with tf.control_dependencies([tf.assert_equal(total_num_frames, 3)]):
sel_indices = tf.constant([1, 2], dtype=tf.int32)
else:
# Sample num_frames_per_video - 1 query frames which are not the
# first frame.
sel_indices = tf.random_shuffle(
tf.range(1, total_num_frames))[:(num_frames_per_video - 1)]
# Concat first frame as reference frame to the front.
sel_indices = tf.concat([tf.constant(0, dtype=tf.int32)[tf.newaxis],
sel_indices], axis=0)
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
# Sample one more frame which we can use to provide initial softmax
# feedback.
num_frames_per_video += 1
ref_idx = tf.random_shuffle(tf.range(total_num_frames))[0]
sampling_is_valid = tf.greater_equal(total_num_frames,
num_frames_per_video)
def sample_query_start_idx():
return tf.random_shuffle(
tf.range(total_num_frames - num_frames_per_video + 1))[0]
query_start_idx = tf.cond(sampling_is_valid, sample_query_start_idx,
lambda: tf.constant(0, dtype=tf.int32))
def sample_sel_indices():
return tf.concat(
[ref_idx[tf.newaxis],
tf.range(
query_start_idx,
query_start_idx + (num_frames_per_video - 1))], axis=0)
sel_indices = tf.cond(
sampling_is_valid, sample_sel_indices,
lambda: tf.zeros((num_frames_per_video,), dtype=tf.int32))
else:
# Randomly sample some frames from the video.
sel_indices = tf.random_shuffle(
tf.range(total_num_frames))[:num_frames_per_video]
image = tf.gather(image, sel_indices, axis=0)
if not video_frames_are_decoded:
image = decode_image_sequence(image)
if label is not None:
if num_frames_per_video is not None:
label = tf.gather(label, sel_indices, axis=0)
if not video_frames_are_decoded:
label = decode_image_sequence(label, image_format='png', channels=1)
# Sometimes, label is saved as [num_frames_per_video, height, width] or
# [num_frames_per_video, height, width, 1]. We change it to be
# [num_frames_per_video, height, width, 1].
if label.shape.ndims == 3:
label = tf.expand_dims(label, 3)
elif label.shape.ndims == 4 and label.shape.dims[3] == 1:
pass
else:
raise ValueError('Input label shape must be '
'[num_frames_per_video, height, width],'
' or [num_frames, height, width, 1]. '
'Got {}'.format(label.shape.ndims))
label.set_shape([None, None, None, 1])
# Add size of first dimension since tf can't figure it out automatically.
image.set_shape((num_frames_per_video, None, None, None))
if label is not None:
label.set_shape((num_frames_per_video, None, None, None))
preceding_frame_label = None
if preprocess_image_and_label:
if num_frames_per_video is None:
raise ValueError('num_frame_per_video must be specified for preproc.')
original_images = []
images = []
labels = []
if sample_adjacent_and_consistent_query_frames:
num_frames_individual_preproc = 1
else:
num_frames_individual_preproc = num_frames_per_video
for frame_idx in range(num_frames_individual_preproc):
original_image_t, image_t, label_t = (
input_preprocess.preprocess_image_and_label(
image[frame_idx],
label[frame_idx],
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.append(original_image_t)
images.append(image_t)
labels.append(label_t)
if sample_adjacent_and_consistent_query_frames:
imgs_for_preproc = [image[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
labels_for_preproc = [label[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
original_image_rest, image_rest, label_rest = (
input_preprocess.preprocess_images_and_labels_consistently(
imgs_for_preproc,
labels_for_preproc,
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.extend(original_image_rest)
images.extend(image_rest)
labels.extend(label_rest)
assert len(original_images) == num_frames_per_video
assert len(images) == num_frames_per_video
assert len(labels) == num_frames_per_video
if remap_labels_to_reference_frame:
# Remap labels to indices into the labels of the (downscaled) reference
# frame, or 0, i.e. background, for labels which are not present
# in the reference.
reference_labels = labels[0][tf.newaxis]
h, w = train_utils.resolve_shape(reference_labels)[1:3]
embedding_height = model.scale_dimension(
h, 1.0 / decoder_output_stride)
embedding_width = model.scale_dimension(
w, 1.0 / decoder_output_stride)
reference_labels_embedding_size = tf.squeeze(
tf.image.resize_nearest_neighbor(
reference_labels, tf.stack([embedding_height, embedding_width]),
align_corners=True),
axis=0)
# Get sorted unique labels in the reference frame.
labels_in_ref_frame, _ = tf.unique(
tf.reshape(reference_labels_embedding_size, [-1]))
labels_in_ref_frame = tf.contrib.framework.sort(labels_in_ref_frame)
for idx in range(1, len(labels)):
ref_label_mask = tf.equal(
labels[idx],
labels_in_ref_frame[tf.newaxis, tf.newaxis, :])
remapped = tf.argmax(tf.cast(ref_label_mask, tf.uint8), axis=-1,
output_type=tf.int32)
# Set to 0 if label is not present
is_in_ref = tf.reduce_any(ref_label_mask, axis=-1)
remapped *= tf.cast(is_in_ref, tf.int32)
labels[idx] = remapped[..., tf.newaxis]
if sample_adjacent_and_consistent_query_frames:
if first_frame_finetuning and generate_prev_frame_mask_by_mask_damaging:
preceding_frame_label = mask_damaging.damage_masks(labels[1])
elif add_prev_frame_label:
# Discard the image of the additional frame and take the label as
# initialization for softmax feedback.
original_images = [original_images[0]] + original_images[2:]
preceding_frame_label = labels[1]
images = [images[0]] + images[2:]
labels = [labels[0]] + labels[2:]
num_frames_per_video -= 1
original_image = tf.stack(original_images, axis=0)
image = tf.stack(images, axis=0)
label = tf.stack(labels, axis=0)
else:
if label is not None:
# Need to set label shape due to batching.
label.set_shape([num_frames_per_video,
None if crop_size is None else crop_size[0],
None if crop_size is None else crop_size[1],
1])
original_image = tf.to_float(tf.zeros_like(label))
if crop_size is None:
height = tf.shape(image)[1]
width = tf.shape(image)[2]
else:
height = crop_size[0]
width = crop_size[1]
sample = {'image': image,
'image_name': image_name,
'height': height,
'width': width,
'video_id': video_id}
if label is not None:
sample['label'] = label
if object_label is not None:
sample['object_label'] = object_label
if preceding_frame_label is not None:
sample['preceding_frame_label'] = preceding_frame_label
if not is_training:
# Original image is only used during visualization.
sample['original_image'] = original_image
if is_training:
if first_frame_finetuning:
keep_input = tf.constant(True)
else:
keep_input = tf.logical_and(sampling_is_valid, tf.logical_and(
_has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride),
_has_foreground_and_background_in_first_frame_2(
label, decoder_output_stride)))
batched = tf.train.maybe_batch(sample,
keep_input=keep_input,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
else:
batched = tf.train.batch(sample,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
# Flatten from [batch, num_frames_per_video, ...] to
# batch * num_frames_per_video, ...].
cropped_height = train_utils.resolve_shape(batched['image'])[2]
cropped_width = train_utils.resolve_shape(batched['image'])[3]
if num_frames_per_video is None:
first_dim = -1
else:
first_dim = batch_size * num_frames_per_video
batched['image'] = tf.reshape(batched['image'],
[first_dim, cropped_height, cropped_width, 3])
if label is not None:
batched['label'] = tf.reshape(batched['label'],
[first_dim, cropped_height, cropped_width, 1])
return batched
|
derekjchow/models
|
research/feelvos/utils/video_input_generator.py
|
Python
|
apache-2.0
| 24,003
|
# -*- coding: utf-8 -*-
"""
twython.streaming.types
~~~~~~~~~~~~~~~~~~~~~~~
This module contains classes and methods for :class:`TwythonStreamer` to use.
"""
class TwythonStreamerTypes(object):
"""Class for different stream endpoints
Not all streaming endpoints have nested endpoints.
User Streams and Site Streams are single streams with no nested endpoints
Status Streams include filter, sample and firehose endpoints
"""
def __init__(self, streamer):
self.streamer = streamer
self.statuses = TwythonStreamerTypesStatuses(streamer)
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def site(self, **params):
"""Stream site
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/site
"""
url = 'https://sitestream.twitter.com/%s/site.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
class TwythonStreamerTypesStatuses(object):
"""Class for different statuses endpoints
Available so TwythonStreamer.statuses.filter() is available.
Just a bit cleaner than TwythonStreamer.statuses_filter(),
statuses_sample(), etc. all being single methods in TwythonStreamer
"""
def __init__(self, streamer):
self.streamer = streamer
def filter(self, **params):
"""Stream statuses/filter
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/post/statuses/filter
"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=params)
def sample(self, **params):
"""Stream statuses/sample
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/sample
"""
url = 'https://stream.twitter.com/%s/statuses/sample.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
|
dakiri/splunk-app-twitter
|
twitter2/bin/twython/streaming/types.py
|
Python
|
apache-2.0
| 2,822
|
"""Support for Hive light devices."""
from datetime import timedelta
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.helpers.entity import DeviceInfo
import homeassistant.util.color as color_util
from . import HiveEntity, refresh_system
from .const import ATTR_MODE, DOMAIN
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("light")
entities = []
if devices:
for dev in devices:
entities.append(HiveDeviceLight(hive, dev))
async_add_entities(entities, True)
class HiveDeviceLight(HiveEntity, LightEntity):
"""Hive Active Light Device."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return device information."""
return DeviceInfo(
identifiers={(DOMAIN, self.device["device_id"])},
manufacturer=self.device["deviceData"]["manufacturer"],
model=self.device["deviceData"]["model"],
name=self.device["device_name"],
sw_version=self.device["deviceData"]["version"],
via_device=(DOMAIN, self.device["parentDevice"]),
)
@property
def name(self):
"""Return the display name of this light."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"]["online"]
@property
def extra_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def brightness(self):
"""Brightness of the light (an integer in the range 1-255)."""
return self.device["status"]["brightness"]
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self.device.get("min_mireds")
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self.device.get("max_mireds")
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self.device["status"].get("color_temp")
@property
def hs_color(self):
"""Return the hs color value."""
if self.device["status"]["mode"] == "COLOUR":
rgb = self.device["status"].get("hs_color")
return color_util.color_RGB_to_hs(*rgb)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
new_brightness = None
new_color_temp = None
new_color = None
if ATTR_BRIGHTNESS in kwargs:
tmp_new_brightness = kwargs.get(ATTR_BRIGHTNESS)
percentage_brightness = (tmp_new_brightness / 255) * 100
new_brightness = int(round(percentage_brightness / 5.0) * 5.0)
if new_brightness == 0:
new_brightness = 5
if ATTR_COLOR_TEMP in kwargs:
tmp_new_color_temp = kwargs.get(ATTR_COLOR_TEMP)
new_color_temp = round(1000000 / tmp_new_color_temp)
if ATTR_HS_COLOR in kwargs:
get_new_color = kwargs.get(ATTR_HS_COLOR)
hue = int(get_new_color[0])
saturation = int(get_new_color[1])
new_color = (hue, saturation, 100)
await self.hive.light.turnOn(
self.device, new_brightness, new_color_temp, new_color
)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self.hive.light.turnOff(self.device)
@property
def supported_features(self):
"""Flag supported features."""
supported_features = None
if self.device["hiveType"] == "warmwhitelight":
supported_features = SUPPORT_BRIGHTNESS
elif self.device["hiveType"] == "tuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
elif self.device["hiveType"] == "colourtuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return supported_features
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.light.getLight(self.device)
self.attributes.update(self.device.get("attributes", {}))
|
jawilson/home-assistant
|
homeassistant/components/hive/light.py
|
Python
|
apache-2.0
| 5,000
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the class for plotting and customizing
Line/Linear Plots with :mod:`trappy.trace.BareTrace` or derived
classes. This plot only works when run from an IPython notebook
"""
from collections import OrderedDict
import matplotlib.pyplot as plt
from trappy.plotter import AttrConf
from trappy.plotter import Utils
from trappy.plotter.Constraint import ConstraintManager
from trappy.plotter.ILinePlotGen import ILinePlotGen
from trappy.plotter.AbstractDataPlotter import AbstractDataPlotter
from trappy.plotter.ColorMap import ColorMap
from trappy.plotter import IPythonConf
from trappy.utils import handle_duplicate_index
import pandas as pd
if not IPythonConf.check_ipython():
raise ImportError("Ipython Environment not Found")
class ILinePlot(AbstractDataPlotter):
"""
This class uses :mod:`trappy.plotter.Constraint.Constraint` to
represent different permutations of input parameters. These
constraints are generated by creating an instance of
:mod:`trappy.plotter.Constraint.ConstraintManager`.
:param traces: The input data
:type traces: a list of :mod:`trappy.trace.FTrace`,
:mod:`trappy.trace.SysTrace`, :mod:`trappy.trace.BareTrace`
or :mod:`pandas.DataFrame` or a single instance of them.
:param column: specifies the name of the column to
be plotted.
:type column: (str, list(str))
:param templates: TRAPpy events
.. note::
This is not required if a :mod:`pandas.DataFrame` is
used
:type templates: :mod:`trappy.base.Base`
:param filters: Filter the column to be plotted as per the
specified criteria. For Example:
::
filters =
{
"pid": [ 3338 ],
"cpu": [0, 2, 4],
}
:type filters: dict
:param per_line: Used to control the number of graphs
in each graph subplot row
:type per_line: int
:param concat: Draw all the pivots on a single graph
:type concat: bool
:param permute: Draw one plot for each of the traces specified
:type permute: bool
:param fill: Fill the area under the plots
:type fill: bool
:param fill_alpha: Opacity of filled area under the plots.
Implies fill=True.
:type fill_alpha: float
:param xlim: A tuple representing the upper and lower xlimits
:type xlim: tuple
:param ylim: A tuple representing the upper and lower ylimits
:type ylim: tuple
:param drawstyle: Set the drawstyle to a matplotlib compatible
drawing style.
.. note::
Only "steps-post" is supported as a valid value for
the drawstyle. This creates a step plot.
:type drawstyle: str
:param sync_zoom: Synchronize the zoom of a group of plots.
Zooming in one plot of a group (see below) will zoom in every
plot of that group. Defaults to False.
:type sync_zoom: boolean
:param group: Name given to the plots created by this ILinePlot
instance. This name is only used for synchronized zoom. If
you zoom on any plot in a group all plots will zoom at the
same time.
:type group: string
:param signals: A string of the type event_name:column to indicate
the value that needs to be plotted. You can add an additional
parameter to specify the color of the lin in rgb:
"event_name:column:color". The color is specified as a comma
separated list of rgb values, from 0 to 255 or from 0x0 to
0xff. E.g. 0xff,0x0,0x0 is red and 100,40,32 is brown.
.. note::
- Only one of `signals` or both `templates` and
`columns` should be specified
- Signals format won't work for :mod:`pandas.DataFrame`
input
:type signals: str
"""
def __init__(self, traces, templates=None, **kwargs):
# Default keys, each can be overridden in kwargs
self._layout = None
super(ILinePlot, self).__init__(traces=traces,
templates=templates)
self.set_defaults()
for key in kwargs:
self._attr[key] = kwargs[key]
if "signals" in self._attr:
self._describe_signals()
self._check_data()
if "column" not in self._attr:
raise RuntimeError("Value Column not specified")
if self._attr["drawstyle"] and self._attr["drawstyle"].startswith("steps"):
self._attr["step_plot"] = True
zip_constraints = not self._attr["permute"]
window = self._attr["xlim"] if "xlim" in self._attr else None
self.c_mgr = ConstraintManager(traces, self._attr["column"], self.templates,
self._attr["pivot"],
self._attr["filters"],
window=window,
zip_constraints=zip_constraints)
def savefig(self, *args, **kwargs):
raise NotImplementedError("Not Available for ILinePlot")
def view(self, max_datapoints=75000, test=False):
"""Displays the graph
:param max_datapoints: Maximum number of datapoints to plot.
Dygraph can make the browser unresponsive if it tries to plot
too many datapoints. Chrome 50 chokes at around 75000 on an
i7-4770 @ 3.4GHz, Firefox 47 can handle up to 200000 before
becoming too slow in the same machine. You can increase this
number if you know what you're doing and are happy to wait for
the plot to render. :type max_datapoints: int
:param test: For testing purposes. Only set to true if run
from the testsuite.
:type test: boolean
"""
# Defer installation of IPython components
# to the .view call to avoid any errors at
# when importing the module. This facilitates
# the importing of the module from outside
# an IPython notebook
if not test:
IPythonConf.iplot_install("ILinePlot")
self._attr["max_datapoints"] = max_datapoints
if self._attr["concat"]:
self._plot_concat()
else:
self._plot(self._attr["permute"], test)
def set_defaults(self):
"""Sets the default attrs"""
self._attr["per_line"] = AttrConf.PER_LINE
self._attr["concat"] = AttrConf.CONCAT
self._attr["filters"] = {}
self._attr["pivot"] = AttrConf.PIVOT
self._attr["permute"] = False
self._attr["drawstyle"] = None
self._attr["step_plot"] = False
self._attr["fill"] = AttrConf.FILL
self._attr["scatter"] = AttrConf.PLOT_SCATTER
self._attr["point_size"] = AttrConf.POINT_SIZE
self._attr["map_label"] = {}
self._attr["title"] = AttrConf.TITLE
def _plot(self, permute, test):
"""Internal Method called to draw the plot"""
pivot_vals, len_pivots = self.c_mgr.generate_pivots(permute)
self._layout = ILinePlotGen(len_pivots, **self._attr)
plot_index = 0
for p_val in pivot_vals:
data_dict = OrderedDict()
for constraint in self.c_mgr:
if permute:
trace_idx, pivot = p_val
if constraint.trace_index != trace_idx:
continue
legend = constraint._template.name + ":" + constraint.column
else:
pivot = p_val
legend = str(constraint)
result = constraint.result
if pivot in result:
data_dict[legend] = result[pivot]
if permute:
title = self.traces[plot_index].name
elif pivot != AttrConf.PIVOT_VAL:
title = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
else:
title = ""
if len(data_dict) > 1:
data_frame = self._fix_indexes(data_dict)
else:
data_frame = pd.DataFrame(data_dict)
self._layout.add_plot(plot_index, data_frame, title, test=test)
plot_index += 1
self._layout.finish()
def _plot_concat(self):
"""Plot all lines on a single figure"""
pivot_vals, _ = self.c_mgr.generate_pivots()
plot_index = 0
self._layout = ILinePlotGen(len(self.c_mgr), **self._attr)
for constraint in self.c_mgr:
result = constraint.result
title = str(constraint)
data_dict = OrderedDict()
for pivot in pivot_vals:
if pivot in result:
if pivot == AttrConf.PIVOT_VAL:
key = ",".join(self._attr["column"])
else:
key = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
data_dict[key] = result[pivot]
if len(data_dict) > 1:
data_frame = self._fix_indexes(data_dict)
else:
data_frame = pd.DataFrame(data_dict)
self._layout.add_plot(plot_index, data_frame, title)
plot_index += 1
self._layout.finish()
def _fix_indexes(self, data_dict):
"""
In case of multiple traces with different indexes (i.e. x-axis values),
create new ones with same indexes
"""
# 1) Check if we are processing multiple traces
if len(data_dict) <= 1:
raise ValueError("Cannot fix indexes for single trace. "\
"Expecting multiple traces!")
# 2) Merge the data frames to obtain common indexes
df_columns = list(data_dict.keys())
dedup_data = [handle_duplicate_index(s) for s in data_dict.values()]
ret = pd.Series(dedup_data, index=df_columns)
merged_df = pd.concat(ret.get_values(), axis=1)
merged_df.columns = df_columns
# 3) Fill NaN values depending on drawstyle
if self._attr["drawstyle"] == "steps-post":
merged_df = merged_df.ffill()
elif self._attr["drawstyle"] == "steps-pre":
merged_df = merged_df.bfill()
elif self._attr["drawstyle"] == "steps-mid":
merged_df = merged_df.ffill()
else:
# default
merged_df = merged_df.interpolate()
return merged_df
|
joelagnel/trappy
|
trappy/plotter/ILinePlot.py
|
Python
|
apache-2.0
| 11,149
|
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import DataRequired
class LoginForm(Form):
email = StringField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
|
russorat/savage-leads
|
settings/models/loginForm.py
|
Python
|
apache-2.0
| 353
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.eager.python import network
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.layers import core
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self):
super(MyNetwork, self).__init__(name="abcd")
self.l1 = self.add_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class NetworkTest(test.TestCase):
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
def testNetworkCall(self):
net = MyNetwork()
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
net.trainable_variables[0].assign([[17.0]])
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, result.numpy())
def testNetworkAsAGraph(self):
self.skipTest("TODO(ashankar,josh11b): FIX THIS")
# Verify that we're using ResourceVariables
def testNetworkVariablesDoNotInterfere(self):
self.skipTest("TODO: FIX THIS")
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
print(type(net1(one)))
net2(one)
net1.trainable_weights[0].assign(constant_op.constant([[1.]]))
net2.trainable_weights[0].assign(constant_op.constant([[2.]]))
print("NET1")
print(net1.name)
print(net1.variables)
print(net1(one))
print("NET2")
print(net2.name)
print(net2.variables)
print(net2(one))
class SequentialTest(test.TestCase):
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add_layer(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
if __name__ == "__main__":
test.main()
|
mdrumond/tensorflow
|
tensorflow/contrib/eager/python/network_test.py
|
Python
|
apache-2.0
| 3,454
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class BatchCounterCallback(callbacks.Callback):
def __init__(self):
self.batch_begin_count = 0
self.batch_end_count = 0
def on_batch_begin(self, *args, **kwargs):
self.batch_begin_count += 1
def on_batch_end(self, *args, **kwargs):
self.batch_end_count += 1
class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat() # Infinite dataset.
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaises(ValueError):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, r'`sample_weight` argument is not supported .+dataset'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
with self.assertRaisesRegexp(
ValueError, '(you should not specify a target)|'
'(`y` argument is not supported when using dataset as input.)'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
# With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
with self.assertRaises(ValueError):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaises(ValueError):
model.evaluate(dataset, verbose=0)
with self.assertRaises(ValueError):
model.predict(dataset, verbose=0)
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_multi_input_output_dataset(self):
input_a = keras.layers.Input(shape=(3,), name='input_1')
input_b = keras.layers.Input(shape=(3,), name='input_2')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)
output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_tuple, steps=2, verbose=1)
# Test with dict
input_dict = {'input_1': input_a_np, 'input_2': input_b_np}
if testing_utils.get_model_type() == 'subclass':
output_dict = {'output_1': output_d_np, 'output_2': output_e_np}
else:
output_dict = {'dense': output_d_np, 'dropout': output_e_np}
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
input_dict, output_dict))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_dict, steps=2, verbose=1)
predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(
input_dict)
predict_dataset_dict = predict_dataset_dict.repeat(100)
predict_dataset_dict = predict_dataset_dict.batch(10)
model.predict(predict_dataset_dict, steps=1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights_correctness(self):
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)
model = keras.Model(x, y)
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)
# equals 42.5 / 4 = 10.625
self.assertEqual(result, 10.625)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
model.compile(
optimizer,
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@keras_parameterized.run_all_keras_modes
def test_dataset_fit_correctness(self):
class SumLayer(keras.layers.Layer):
def build(self, _):
self.w = self.add_weight('w', ())
def call(self, inputs):
return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0
model = keras.Sequential([SumLayer(input_shape=(2,))])
model.compile(
'rmsprop',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
inputs[20:30, :] = 1
inputs[30:, :] = 4
targets = np.zeros((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, steps_per_epoch=2, verbose=1,
validation_data=val_dataset, validation_steps=2)
self.assertAllClose(history.history['loss'],
[inputs[:20].sum() / 20, inputs[20:].sum() / 20])
# The validation dataset will be reset at the end of each validation run.
self.assertAllClose(history.history['val_loss'],
[inputs[:20].sum() / 20, inputs[:20].sum() / 20])
# Test correctness with dataset reset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, verbose=1, validation_data=val_dataset)
self.assertAllClose(
history.history['loss'],
[inputs.sum() / 40, inputs.sum() / 40])
self.assertAllClose(
history.history['val_loss'],
[inputs.sum() / 40, inputs.sum() / 40])
def test_dataset_input_shape_validation(self):
with ops.get_default_graph().as_default(), self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer='rmsprop', loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_known_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):
class CaptureStdout(object):
def __enter__(self):
self._stdout = sys.stdout
string_io = six.StringIO()
sys.stdout = string_io
self._stringio = string_io
return self
def __exit__(self, *args):
self.output = self._stringio.getvalue()
sys.stdout = self._stdout
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with CaptureStdout() as capture:
history = model.fit(
dataset,
epochs=2,
callbacks=[batch_counter],
validation_data=dataset.take(3))
lines = capture.output.splitlines()
self.assertIn('10/10', lines[-1])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_begin_count, 21)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with test.mock.patch.object(logging, 'warning') as mock_log:
# steps_per_epoch (200) is greater than the dataset size (100). As this is
# unexpected, training will stop and not make it to the second epoch.
history = model.fit(
dataset,
epochs=2,
verbose=1,
callbacks=[batch_counter],
steps_per_epoch=200)
self.assertIn(
'ran out of data; interrupting training.', str(mock_log.call_args))
self.assertIn(
'can generate at least '
'`steps_per_epoch * epochs` batches (in this case, 400 batches). '
'You may need to use the repeat() function when '
'building your dataset.', str(mock_log.call_args))
self.assertLen(history.history['loss'], 1)
self.assertEqual(batch_counter.batch_end_count, 10)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_all_keras_modes
def test_with_external_loss(self):
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.add_loss(math_ops.reduce_mean(out))
model.compile('rmsprop')
x = np.ones((10, 4))
# dataset contains only features, no labels.
dataset = dataset_ops.Dataset.from_tensor_slices(x).repeat(10).batch(10)
model.fit(dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_train_eval_with_steps(self):
# See b/142880049 for more details.
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.compile(
'rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 4), dtype=np.float32)
targets = np.random.randint(0, 2, size=100, dtype=np.int32)
training_ds = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).repeat().batch(10)
# Create eval dataset with generator, so that dataset won't contain the
# overall size metadata. Without eval_steps, we expect to run through all
# the data in this dataset every epoch.
def gen():
for _ in range(100):
yield (np.zeros(4, dtype=np.float32),
np.random.randint(0, 2, size=1, dtype=np.int32))
eval_ds = dataset_ops.Dataset.from_generator(
generator=gen,
output_types=('float64', 'int32'),
output_shapes=([4], [1])).batch(100)
batch_counter = BatchCounterCallback()
model.fit(
training_ds,
steps_per_epoch=10,
epochs=10,
validation_data=eval_ds,
callbacks=[batch_counter]
)
# Expect 10 batch from training per epoch.
self.assertEqual(batch_counter.batch_end_count, 100)
class TestMetricsWithDatasets(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_metrics_correctness_with_dataset(self):
layers = [
keras.layers.Dense(8, activation='relu', input_dim=4,
kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
if __name__ == '__main__':
test.main()
|
gunan/tensorflow
|
tensorflow/python/keras/engine/training_dataset_test.py
|
Python
|
apache-2.0
| 21,265
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""
Database Module
===============
This is where all the magic happens.
.. versionadded:: 0.2.0
SQLite3 Database:
To hold the information about all the data created
:class:`~oyProjectManager.models.project.Project`\ s,
:class:`~oyProjectManager.models.sequence.Sequence`\ s,
:class:`~oyProjectManager.models.shot.Shot`\ s,
:class:`~oyProjectManager.models.asset.Asset`\ s and
:class:`~oyProjectManager.models.version.VersionType`\ s
, there is a ".metadata.db" file in the repository root. This SQLite3
database has all the information about everything.
With this new extension it is much faster to query any data needed.
Querying data is very simple and fun. To get any kind of data from the
database, just call the ``db.setup()`` and then use ``db.query`` to get the
data.
For a simple example, lets get all the shots for a Sequence called
"TEST_SEQ" in the "TEST_PROJECT"::
from oyProjectManager import db
from oyProjectManager import Project, Sequence, Shot
# setup the database session
db.setup()
all_shots = Shot.query().join(Sequence).\
filter(Sequence.project.name="TEST_PROJECT").\
filter(Shot.sequence.name=="TEST_SEQ").all()
that's it.
"""
import os
import logging
import sqlalchemy
import oyProjectManager
from oyProjectManager.db.declarative import Base
# SQLAlchemy database engine
engine = None
# SQLAlchemy session manager
session = None
query = None
# SQLAlchemy metadata
metadata = None
database_url = None
# create a logger
logger = logging.getLogger(__name__)
#logger.setLevel(logging.WARNING)
logger.setLevel(logging.DEBUG)
def setup(database_url_in=None):
"""Utility function that helps to connect the system to the given database.
Returns the created session
:param database_url_in: The database address, default is None. If the
database_url is skipped or given as None, the default database url
from the :mod:`oyProjectManager.config` will be used. This is good,
just call ``db.setup()`` and then use ``db.session`` and ``db.query``
to get the data.
:returns: sqlalchemy.orm.session
"""
global engine
global session
global query
global metadata
global database_url
# create engine
# TODO: create tests for this
if database_url_in is None:
logger.debug("using the default database_url from the config file")
# use the default database
conf = oyProjectManager.conf
database_url_in = conf.database_url
# expand user and env variables if any
# TODO: because the dialect part and the address part are now coming from
# from one source, it is not possible to expand any variables in the path,
# try to use SQLAlchemy to separate the dialect and the address part and
# expand any data and then merge it again
#database_url_in = os.path.expanduser(
# os.path.expandvars(
# os.path.expandvars(
# database_url_in
# )
# )
#)
while "$" in database_url_in or "~" in database_url_in:
database_url_in = os.path.expanduser(
os.path.expandvars(
database_url_in
)
)
database_url = database_url_in
logger.debug("setting up database in %s" % database_url)
engine = sqlalchemy.create_engine(database_url, echo=False)
# create the tables
metadata = Base.metadata
metadata.create_all(engine)
# create the Session class
Session = sqlalchemy.orm.sessionmaker(bind=engine)
# create and save session object to session
session = Session()
query = session.query
# initialize the db
__init_db__()
# TODO: create a test to check if the returned session is session
return session
def __init_db__():
"""initializes the just setup database
It adds:
- Users
- VersionTypes
to the database.
"""
logger.debug("db is newly created, initializing the db")
global query
global session
# get the users from the config
from oyProjectManager import conf
# ------------------------------------------------------
# create the users
from oyProjectManager.models.auth import User
# get all users from db
users_from_db = query(User).all()
for user_data in conf.users_data:
name = user_data.get("name")
initials = user_data.get("initials")
email = user_data.get("email")
user_from_config = User(name, initials, email)
if user_from_config not in users_from_db:
session.add(user_from_config)
# ------------------------------------------------------
# add the VersionTypes
from oyProjectManager.models.version import VersionType
version_types_from_db = query(VersionType).all()
for version_type in conf.version_types:
version_type_from_conf = VersionType(**version_type)
if version_type_from_conf not in version_types_from_db:
session.add(version_type_from_conf)
session.commit()
logger.debug("finished initialization of the db")
|
dshlai/oyprojectmanager
|
oyProjectManager/db/__init__.py
|
Python
|
bsd-2-clause
| 5,475
|
import os, sys
###########
def verbose_print(msg):
print msg
###########
def linetrim(s):
return s.replace('\r', '').replace('\n','')
###
def sublast(s, s2):
return s[s.find(s2)+len(s2):]
def parse_package(fpath):
#p = fpath[:fpath.rfind('/')]
p = fpath
package = 'konoha'
if p.find('/konoha/') != -1:
return 'konoha'
elif p.find('/class/') != -1:
package = sublast(p, '/class/')
elif p.find('/package/') != -1:
package = '+' + sublast(p, '/package/')
elif p.find('/api/') != -1:
package = sublast(p, '/api/')
elif p.find('/driver/') != -1:
package = '#' + sublast(p, '/driver/')
if package.find('_.') > 0: return 'konoha'
if package.find('/') > 0:
return package.split('/')[0]
return package
# p = fpath.split('/')
# if p[-1].find('.') == -1: return p[-1]
# return p[-2]
###
def fpath_shortname(fpath):
p = fpath.split('/')
return p[-1].replace('.c', '')
###
def safedict(d, key, defv):
if d.has_key(key): return d[key]
d[key] = defv
return defv
###
###
def list_topair(list):
t1 = list[0]
t2 = list[1]
return t1, t2, list[2:]
def parse_funcparams(functype):
if not functype.endswith(')'):
debug_print('Invalid functype: %s' % functype)
t = functype.replace('(', ' ').replace(',', ' ').replace(')', '').split()
params = []
while len(t) > 1:
tt, tn, t = list_topair(t)
params.append(nz_cparam(tt, tn))
return params
###########
# ---------------------------------------------------------------------------
LINE = '''
/* ------------------------------------------------------------------------ */
'''
DLINE = '''
/* ------------------------------------------------------------------------ */
'''
# ---------------------------------------------------------------------------
def write_println(f, msg = ''):
f.write(msg+'\n')
def write_line(f):
f.write(LINE)
def write_dline(f):
f.write(DLINE)
def write_comment(f, msg):
f.write('/* %s */\n' % msg)
def write_chapter(f, msg):
f.write(DLINE)
write_comment(f, msg)
def write_section(f, msg):
f.write(LINE)
write_comment(f, msg)
def write_define(f, name, value='', n=40):
s = '#define %s ' % name
while(len(s) < n) : s+=' '
f.write(s)
f.write(value)
f.write('\n')
###
def write_ifndefine(f, name, value='', n=40):
f.write('#ifndef %s\n' % name)
write_define(f, name, value, n)
f.write('#endif\n')
###
def write_ifndef(f, name, value='', n=40):
f.write('#ifndef %s\n' % name)
write_define(f, name, value, n)
f.write('#endif\n')
def write_ifdef(f, n):
f.write('''
#ifdef %s''' % n.upper())
def write_else(f, n):
f.write('''
#else /*%s*/
''' % n.upper())
def write_endif(f, n):
f.write('''
#endif/*%s*/
''' % n.upper())
# ---------------------------------------------------------------------------
def write_BOM(f):
f.write("%c%c%c" % (0xef, 0xbb, 0xbf))
def write_license(f):
f.write('''/****************************************************************************
* KONOHA2 COPYRIGHT, LICENSE NOTICE, AND DISCRIMER
*
* Copyright (c) 2006-2012, Kimio Kuramitsu <kimio at ynu.ac.jp>
* (c) 2008- Konoha Team konohaken@googlegroups.com
* All rights reserved.
*
* You may choose one of the following two licenses when you use konoha.
* If you want to use the latter license, please contact us.
*
* (1) GNU General Public License 3.0 (with K_UNDER_GPL)
* (2) Konoha Non-Disclosure License 1.0
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
''')
def write_begin_c(f):
f.write('''
#ifdef __cplusplus
extern "C" {
#endif
''')
def write_end_c(f):
f.write('''
#ifdef __cplusplus
}
#endif
''')
# ---------------------------------------------------------------------------
def getdict(d, n, defv):
if d.has_key(n): return d[n]
return defv
def read_settings(fn):
KNH_DATA = {}
try:
f = open(fn)
exec(f)
f.close()
return KNH_DATA
except OSError, e:
print e
return KNH_DATA
# ---------------------------------------------------------------------------
def nz_fname(fname):
if fname.rfind('/') > 0: return fname[fname.rfind('/')+1:]
return fname
def open_h(fname, lists):
f = open(fname, 'w')
write_license(f)
d = nz_fname(fname).replace('.', '_'). upper()
f.write('''
#ifndef %s
#define %s
''' % (d, d))
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
write_begin_c(f)
write_dline(f)
return f
def open_h2(fname, lists):
f = open(fname, 'w')
write_license(f)
d = nz_fname(fname).replace('.', '_'). upper()
f.write('''
#ifndef %s
#define %s
''' % (d, d))
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
return f
# ---------------------------------------------------------------------------
def close_h(f, fname):
d = nz_fname(fname).replace('.', '_'). upper()
write_end_c(f)
write_dline(f)
f.write('''
#endif/*%s*/
''' % d)
f.close()
# ---------------------------------------------------------------------------
def open_c(fname, lists, bom = None):
f = open(fname, 'w')
if bom == 'BOM': write_BOM(f)
write_license(f)
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
write_begin_c(f)
write_dline(f)
return f
def close_c(f, fname):
write_end_c(f)
f.close()
def get_serial_number():
f = open('SERIAL_NUMBER')
n = int(f.readline())
f.close()
n += 1
f = open('SERIAL_NUMBER', 'w')
f.write('%d\n' % n)
f.close()
return n
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
def parse_options(option):
d = {}
if option is None: return d
for t in option.split():
if t.find('(') > 0:
t = t.replace('(', ' ').replace(')', '')
t = t.split()
d[t[0]] = t[1]
else:
d[t] = 1
return d
# ---------------------------------------------------------------------------
def check_ifdef(d):
ifdef = ''
endif = ''
if d.has_key('@ifdef'):
ifdef = '#ifdef KNH_IMPORT_%s_\n' % d['@ifdef']
endif = '#endif/*KNH_IMPORT_%s_*/\n' %d['@ifdef']
return ifdef, endif
# ---------------------------------------------------------------------------
def alias_lname(cname):
if cname.find('_') > 0:
return cname.split('_')[1]
return cname
def STRUCT_cname(cname):
return 'STRUCT_%s' % cname
def STRUCT_sname(cname):
return 'STRUCT_%s' % cname
def SAFE_cname(t) :
t = t.replace('..', '')
t = t.replace('!', '')
t = t.replace('[]', '')
t = t.replace('::', '__')
t = t.replace(':', '__')
return t
def CLASS_cname(cname) :
prefix = ''
if cname.endswith('[]'): prefix = 'A'
if cname.endswith('..'): prefix = 'I'
return '%sCLASS_%s' % (prefix, SAFE_cname(cname))
def T_cname(t) :
prefix = ''
if t.endswith("[]!"): prefix = 'NNA'
elif t.endswith("!") : prefix = 'NN'
if t.endswith('[]'): prefix = 'A'
if t.endswith('..'): prefix = 'NNI'
return '%sT_%s' % (prefix, SAFE_cname(t))
def DEBUG_cname(cname):
return 'DEBUG_%s' % cname
def FN_name(fn):
return 'FN_%s' % fn
def SAFE_mname(mname):
return mname.replace('::', '__').replace(':', '__').replace('%', '_')
def MN_mname(mname):
return 'MN_%s' % SAFE_mname(mname)
# ---------------------------------------------------------------------------
DEBUG = None
def debug_print(msg):
if not DEBUG: print msg
def nz_dir(dir):
if dir.endswith('/'): return dir[:len(dir)-1]
return dir
#------------------------------------------------------------------------------------
FUNCMAP = {}
def FUNCMAP_found(funcname):
FUNCMAP[funcname] = funcname
def FUNCMAP_exists(funcname):
return FUNCMAP.has_key(funcname)
|
shidasan/konoha2
|
src/vm/pygenlib2.py
|
Python
|
bsd-2-clause
| 8,371
|
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = ['Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval',
'Fisher_Jenks', 'Fisher_Jenks_Sampled', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers']
from pysal.common import *
K = 5 # default number of classes in any map scheme with this as an argument
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
implicit : array
(n,1), quantile values
Examples
--------
>>> x = np.arange(1000)
>>> quantile(x)
array([ 249.75, 499.5 , 749.25, 999. ])
>>> quantile(x, k = 3)
array([ 333., 666., 999.])
>>>
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> quantile(y)
array([ 1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
return np.unique(q)
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = range(2, 8)
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and print a warning if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
print 'warning: value not in bin: ', val
print 'bins: ', bins
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts: int
number of elements of x falling in each bin
Examples
--------
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-sys.maxint]
left.extend(bins[0:-1])
right = bins
cuts = zip(left, right)
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds)
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
import pysal
np.random.seed(10)
dat = pysal.open(pysal.examples.get_path('calempdensity.csv'))
cal = np.array([record[-1] for record in dat])
return cal
def natural_breaks(values, k=5, itmax=100):
"""
natural breaks helper function
"""
values = np.array(values)
n = len(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
print 'Warning: Not enough unique values in array to form k classes'
print "Warning: setting k to %d" % uvk
k = uvk
sids = np.random.permutation(range(len(uv)))[0:k]
seeds = uv[sids]
seeds.sort()
diffs = abs(np.matrix([values - seed for seed in seeds]))
c0 = diffs.argmin(axis=0)
c0 = np.array(c0)[0]
solving = True
solved = False
rk = range(k)
it = 0
while solving:
# get centroids of clusters
seeds = [np.median(values[c0 == c]) for c in rk]
seeds.sort()
# for each value find closest centroid
diffs = abs(np.matrix([values - seed for seed in seeds]))
# assign value to that centroid
c1 = diffs.argmin(axis=0)
c1 = np.array(c1)[0]
#compare new classids to previous
d = abs(c1 - c0)
if d.sum() == 0:
solving = False
solved = True
else:
c0 = c1
it += 1
if it == itmax:
solving = False
class_ids = c1
cuts = [max(values[c1 == c]) for c in rk]
return sids, seeds, diffs, class_ids, solved, it, cuts
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
mat1 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, classes + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(values) + 1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2, len(values) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(values[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(values)
kclass = []
for i in range(0, classes + 1):
kclass.append(0)
kclass[classes] = float(values[len(values) - 1])
kclass[0] = float(values[0])
countNum = classes
while countNum >= 2:
pivot = mat1[k][countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
countNum -= 1
return kclass
class Map_Classifier:
"""
Abstract class for all map classifications
For an array :math:`y` of :math:`n` values, a map classifier places each value
:math:`y_i` into one of :math:`k` mutually exclusive and exhaustive classes.
Each classifer defines the classes based on different criteria, but in all
cases the following hold for the classifiers in PySAL:
.. math::
C_j^l < y_i \le C_j^u \ forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound :math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`~pysal.esda.mapclassify.Box_Plot`
* :class:`~pysal.esda.mapclassify.Equal_Interval`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks_Sampled`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Forced`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Sampled`
* :class:`~pysal.esda.mapclassify.Max_P_Classifier`
* :class:`~pysal.esda.mapclassify.Maximum_Breaks`
* :class:`~pysal.esda.mapclassify.Natural_Breaks`
* :class:`~pysal.esda.mapclassify.Quantiles`
* :class:`~pysal.esda.mapclassify.Percentiles`
* :class:`~pysal.esda.mapclassify.Std_Mean`
* :class:`~pysal.esda.mapclassify.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that can be used to evaluate the properties of a specific classifier for different parameter values, or for automatic selection of a classifier and number of classes.
* :func:`~pysal.esda.mapclassify.gadf`
* :class:`~pysal.esda.mapclassify.K_classifiers`
References
----------
Slocum, T.A., R.B. McMaster, F.C. Kessler and H.H. Howard (2009) *Thematic Cartography and Geovisualization*. Pearson Prentice Hall, Upper Saddle River.
"""
def __init__(self, y):
self.name = 'Map Classifier'
if hasattr(y, 'values'):
y = y.values # fix for pandas
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
k1 = self.k - 1
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
rows = []
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> ei = Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
>>>
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> p = Percentiles(cal)
>>> p.bins
array([ 1.35700000e-01, 5.53000000e-01, 9.36500000e+00,
2.13914000e+02, 2.17994800e+03, 4.11145000e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high outliers,
otherwise, there will be 6 classes and at least one high outlier.
Examples
--------
>>> cal = load_example()
>>> bp = Box_Plot(cal)
>>> bp.bins
array([ -5.28762500e+01, 2.56750000e+00, 9.36500000e+00,
3.95300000e+01, 9.49737500e+01, 4.11145000e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids]
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74,
4111.45, 317.11, 264.93])
>>> bx = Box_Plot(np.arange(100))
>>> bx.bins
array([ -49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(pct)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> q = Quantiles(cal, k = 5)
>>> q.bins
array([ 1.46400000e+00, 5.79800000e+00, 1.32780000e+01,
5.46160000e+01, 4.11145000e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> st = Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([ -967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
>>>
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> mb = Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
>>>
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
y = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> np.random.seed(10)
>>> cal = load_example()
>>> nb = Natural_Breaks(cal, k = 5)
>>> nb.k
5
>>> nb.counts
array([14, 13, 14, 10, 7])
>>> nb.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification
If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a
higher value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
res0 = natural_breaks(x, k)
fit = res0[2].sum()
for i in xrange(self.initial):
res = natural_breaks(x, k)
fit_i = res[2].sum()
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
self.iterations = res0[-2]
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> fj = Fisher_Jenks(cal)
>>> fj.adcm
799.24000000000001
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jc = Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = range(self.k)
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts),)
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> x = np.random.random(100000)
>>> jc = Jenks_Caspall(x)
>>> jcs = Jenks_Caspall_Sampled(x)
>>> jc.bins
array([ 0.19770952, 0.39695769, 0.59588617, 0.79716865, 0.99999425])
>>> jcs.bins
array([ 0.18877882, 0.39341638, 0.6028286 , 0.80070925, 0.99999425])
>>> jc.counts
array([19804, 20005, 19925, 20178, 20088])
>>> jcs.counts
array([18922, 20521, 20980, 19826, 19751])
>>>
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jcf = Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[ 1.34000000e+00],
[ 5.90000000e+00],
[ 1.67000000e+01],
[ 5.06500000e+01],
[ 4.11145000e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[ 2.51000000e+00],
[ 8.70000000e+00],
[ 3.66800000e+01],
[ 4.11145000e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
maxk = k - 1
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == i]) for i in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.4499999999998]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
>>>
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import pysal
>>> cal = pysal.esda.mapclassify.load_example()
>>> mp = pysal.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 16.7 , 20.47, 66.26, 4111.45])
>>> mp.counts
array([29, 8, 1, 10, 10])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = range(n)
seeds = [np.nonzero(di == min(
di))[0][0] for di in [np.abs(x - qi) for qi in q]]
rseeds = np.random.permutation(range(k)).tolist()
tmp = [remaining.remove(seed) for seed in seeds]
self.classes = classes = []
tmp = [classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
it = 0
while swapping:
rseeds = np.random.permutation(range(k)).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> cal = load_example()
>>> qgadf = gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.37402575909092828
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers:
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier instances with the best pct for each classifer
Examples
--------
>>> cal = load_example()
>>> ks = K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.84810327199081048
>>>
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
c = 0
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = kmethods.keys()
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
def fj(x, k=5):
y = x.copy()
y.sort()
d = {}
initial = opt_part(y)
# d has key = number of groups
# value: list of ids, list of group tss, group size
split_id = [initial[0]]
tss = initial[1:] # left and right within tss
sizes = [split_id - 1, len(y) - split_id]
d[2] = [split_id, tss, sizes]
return d
def opt_part(x):
"""
Find optimal bi-partition of x values
Parameters
-----------
x : array
(n,1), Array of attribute values
Returns
-------
opt_i : int
partition index
tss : float
toal sum of squares
left_min : float
variance to the left of the break (including the break)
right_min : float
variance to the right of the break
"""
n = len(x)
tss = np.inf
opt_i = -999
for i in xrange(1, n):
print i
left = x[:i].var() * i
right = x[i:].var() * (n - i)
tss_i = left + right
if tss_i < tss:
opt_i = i
tss = tss_i
left_min = left
right_min = right
return (opt_i, tss, left_min, right_min)
|
spreg-git/pysal
|
pysal/esda/mapclassify.py
|
Python
|
bsd-3-clause
| 50,683
|
from tests.testing_framework.base_test_cases import BaseTestCase
from flexmock import flexmock
from hamcrest import *
from framework.plugin.plugin_params import PluginParams
import re
from hamcrest.library.text.stringmatches import matches_regexp
class PluginParamsTests(BaseTestCase):
def before(self):
self.core_mock = flexmock()
self.plugin_params = PluginParams(self.core_mock, {'Args': ['arg1=val1', "arg2=val2"]})
def test_ProcessArgs(self):
assert_that(self.plugin_params.ProcessArgs(), is_(True))
assert_that(self.plugin_params.Args["arg1"], is_("val1"))
assert_that(self.plugin_params.Args["arg2"], is_("val2"))
def test_ListArgs_should_print_the_args_to_the_stdout(self):
args = {"arg_name": "arg_value"}
self.init_stdout_recording()
self.plugin_params.ListArgs(args)
output = self.get_recorded_stdout_and_close()
assert_that(output is not None)
def test_ShowParamInfo_should_print_the_params_to_the_stdout(self):
args = {"Description": "plugin description",
"Mandatory": {"arg_name": "arg_value"},
"Optional": {"arg_name": "arg_value"}}
plugin = self._get_plugin_example()
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("FrameworkAbort").once()
self.init_stdout_recording()
self.plugin_params.ShowParamInfo(args, plugin)
output = self.get_recorded_stdout_and_close()
assert_that(output is not None)
def test_CheckArgList_should_be_ok(self):
plugin = self._get_plugin_example()
args = {"Mandatory": [], "Optional": [], "Description": ""}
assert_that(self.plugin_params.CheckArgList(args, plugin))
def test_CheckArgList_with_missing_Mandatory_and_Optional_args(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").with_args(re.compile(".*Mandatory.*Optional")).once()
plugin = self._get_plugin_example()
self.plugin_params.CheckArgList({}, plugin)
def test_CheckArgList_with_missing_description_arg(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").with_args(re.compile(".*requires.*Description")).once()
plugin = self._get_plugin_example()
args = {"Mandatory": [], "Optional": []}
self.plugin_params.CheckArgList(args, plugin)
def test_SetArgsBasic_sets_the_args_to_the_plugin(self):
plugin = self._get_plugin_example()
args = {"arg1": "val1", "arg2": "val2"}
self.plugin_params.Args = args
assert_that(self.plugin_params.SetArgsBasic(args, plugin), equal_to([args]))
assert_that(plugin["Args"], matches_regexp(".*arg1=val1.*"))
assert_that(plugin["Args"], matches_regexp(".*arg2=val2.*"))
def test_SetConfig_is_a_wrapper(self):
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("Set").with_args("_arg1", "val1").once()
args = {"arg1": "val1"}
self.plugin_params.SetConfig(args)
def test_GetArgList_returns_the_args_we_ask_for(self):
arg_list = ["arg1", "arg2"]
plugin = self._get_plugin_example()
result = self.plugin_params.GetArgList(arg_list, plugin)
assert_that(result["arg1"], is_("val1"))
assert_that(result["arg2"], is_("val2"))
def test_GetArgList_registers_an_error_for_not_foud_args(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").once()
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("IsSet").and_return(False)
arg_list = ["non_existent_arg"]
plugin = self._get_plugin_example()
result = self.plugin_params.GetArgList(arg_list, plugin)
assert_that(result, is_({}))
assert_that(plugin["ArgError"], is_(True))
def test_GetArgs(self):
args = {"Mandatory": ["arg1"],
"Optional": ["arg2"],
"Description": "description"}
plugin = self._get_plugin_example()
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("IsSet").and_return(False)
result = self.plugin_params.GetArgs(args, plugin)
assert_that(result[0]["arg1"], is_("val1"))
assert_that(result[0]["arg2"], is_("val2"))
def _get_plugin_example(self):
return {'Args': '', 'Code': 'OWASP-IG-005', 'Group': 'web', 'Name': 'Application_Discovery', 'File': 'Application_Discovery@OWASP-IG-005.py', 'Title': 'Application Discovery', 'Descrip': '', 'Type': 'passive'}
|
sharad1126/owtf
|
tests/test_cases/framework/plugin/plugin_params_tests.py
|
Python
|
bsd-3-clause
| 4,663
|
"""
Floater Class: Velocity style controller for floating point values with
a label, entry (validated), and scale
"""
__all__ = ['Floater', 'FloaterWidget', 'FloaterGroup']
from direct.showbase.TkGlobal import *
from Tkinter import *
from Valuator import Valuator, VALUATOR_MINI, VALUATOR_FULL
from direct.task import Task
import math, sys, string, Pmw
FLOATER_WIDTH = 22
FLOATER_HEIGHT = 18
class Floater(Valuator):
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('style', VALUATOR_MINI, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Valuator.__init__(self, parent)
self.initialiseoptions(Floater)
def createValuator(self):
self._valuator = self.createcomponent('valuator',
(('floater', 'valuator'),),
None,
FloaterWidget,
(self.interior(),),
command = self.setEntry,
value = self['value'])
self._valuator._widget.bind('<Double-ButtonPress-1>', self.mouseReset)
def packValuator(self):
# Position components
if self._label:
self._label.grid(row=0, column=0, sticky = EW)
self._entry.grid(row=0, column=1, sticky = EW)
self._valuator.grid(row=0, column=2, padx = 2, pady = 2)
self.interior().columnconfigure(0, weight = 1)
class FloaterWidget(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
# Appearance
('width', FLOATER_WIDTH, INITOPT),
('height', FLOATER_HEIGHT, INITOPT),
('relief', RAISED, self.setRelief),
('borderwidth', 2, self.setBorderwidth),
('background', 'grey75', self.setBackground),
# Behavior
# Initial value of floater, use self.set to change value
('value', 0.0, INITOPT),
('numDigits', 2, self.setNumDigits),
# Command to execute on floater updates
('command', None, None),
# Extra data to be passed to command function
('commandData', [], None),
# Callback's to execute during mouse interaction
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Pmw.MegaWidget.__init__(self, parent)
# Set up some local and instance variables
# Create the components
interior = self.interior()
# Current value
self.value = self['value']
# The canvas
width = self['width']
height = self['height']
self._widget = self.createcomponent('canvas', (), None,
Canvas, (interior,),
width = width,
height = height,
background = self['background'],
highlightthickness = 0,
scrollregion = (-width/2.0,
-height/2.0,
width/2.0,
height/2.0))
self._widget.pack(expand = 1, fill = BOTH)
# The floater icon
self._widget.create_polygon(-width/2.0, 0, -2.0, -height/2.0,
-2.0, height/2.0,
fill = 'grey50',
tags = ('floater',))
self._widget.create_polygon(width/2.0, 0, 2.0, height/2.0,
2.0, -height/2.0,
fill = 'grey50',
tags = ('floater',))
# Add event bindings
self._widget.bind('<ButtonPress-1>', self.mouseDown)
self._widget.bind('<B1-Motion>', self.updateFloaterSF)
self._widget.bind('<ButtonRelease-1>', self.mouseUp)
self._widget.bind('<Enter>', self.highlightWidget)
self._widget.bind('<Leave>', self.restoreWidget)
# Make sure input variables processed
self.initialiseoptions(FloaterWidget)
def set(self, value, fCommand = 1):
"""
self.set(value, fCommand = 1)
Set floater to new value, execute command if fCommand == 1
"""
# Send command if any
if fCommand and (self['command'] != None):
apply(self['command'], [value] + self['commandData'])
# Record value
self.value = value
def updateIndicator(self, value):
# Nothing visible to update on this type of widget
pass
def get(self):
"""
self.get()
Get current floater value
"""
return self.value
## Canvas callback functions
# Floater velocity controller
def mouseDown(self, event):
""" Begin mouse interaction """
# Exectute user redefinable callback function (if any)
self['relief'] = SUNKEN
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
self.velocitySF = 0.0
self.updateTask = taskMgr.add(self.updateFloaterTask,
'updateFloater')
self.updateTask.lastTime = globalClock.getFrameTime()
def updateFloaterTask(self, state):
"""
Update floaterWidget value based on current scaleFactor
Adjust for time to compensate for fluctuating frame rates
"""
currT = globalClock.getFrameTime()
dt = currT - state.lastTime
self.set(self.value + self.velocitySF * dt)
state.lastTime = currT
return Task.cont
def updateFloaterSF(self, event):
"""
Update velocity scale factor based of mouse distance from origin
"""
x = self._widget.canvasx(event.x)
y = self._widget.canvasy(event.y)
offset = max(0, abs(x) - Valuator.deadband)
if offset == 0:
return 0
sf = math.pow(Valuator.sfBase,
self.minExp + offset/Valuator.sfDist)
if x > 0:
self.velocitySF = sf
else:
self.velocitySF = -sf
def mouseUp(self, event):
taskMgr.remove(self.updateTask)
self.velocitySF = 0.0
# Execute user redefinable callback function (if any)
if self['postCallback']:
apply(self['postCallback'], self['callbackData'])
self['relief'] = RAISED
def setNumDigits(self):
"""
Adjust minimum exponent to use in velocity task based
upon the number of digits to be displayed in the result
"""
self.minExp = math.floor(-self['numDigits']/
math.log10(Valuator.sfBase))
# Methods to modify floater characteristics
def setRelief(self):
self.interior()['relief'] = self['relief']
def setBorderwidth(self):
self.interior()['borderwidth'] = self['borderwidth']
def setBackground(self):
self._widget['background'] = self['background']
def highlightWidget(self, event):
self._widget.itemconfigure('floater', fill = 'black')
def restoreWidget(self, event):
self._widget.itemconfigure('floater', fill = 'grey50')
class FloaterGroup(Pmw.MegaToplevel):
def __init__(self, parent = None, **kw):
# Default group size
DEFAULT_DIM = 1
# Default value depends on *actual* group size, test for user input
DEFAULT_VALUE = [0.0] * kw.get('dim', DEFAULT_DIM)
DEFAULT_LABELS = ['v[%d]' % x for x in range(kw.get('dim', DEFAULT_DIM))]
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
('dim', DEFAULT_DIM, INITOPT),
('side', TOP, INITOPT),
('title', 'Floater Group', None),
# A tuple of initial values, one for each floater
('value', DEFAULT_VALUE, INITOPT),
# The command to be executed any time one of the floaters is updated
('command', None, None),
# A tuple of labels, one for each floater
('labels', DEFAULT_LABELS, self._updateLabels),
)
self.defineoptions(kw, optiondefs)
# Initialize the toplevel widget
Pmw.MegaToplevel.__init__(self, parent)
# Create the components
interior = self.interior()
# Get a copy of the initial value (making sure its a list)
self._value = list(self['value'])
# The Menu Bar
self.balloon = Pmw.Balloon()
menubar = self.createcomponent('menubar', (), None,
Pmw.MenuBar, (interior,),
balloon = self.balloon)
menubar.pack(fill=X)
# FloaterGroup Menu
menubar.addmenu('Floater Group', 'Floater Group Operations')
menubar.addmenuitem(
'Floater Group', 'command', 'Reset the Floater Group panel',
label = 'Reset',
command = lambda s = self: s.reset())
menubar.addmenuitem(
'Floater Group', 'command', 'Dismiss Floater Group panel',
label = 'Dismiss', command = self.withdraw)
menubar.addmenu('Help', 'Floater Group Help Operations')
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(0)
menubar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label = 'Balloon Help',
variable = self.toggleBalloonVar,
command = self.toggleBalloon)
self.floaterList = []
for index in range(self['dim']):
# Add a group alias so you can configure the floaters via:
# fg.configure(Valuator_XXX = YYY)
f = self.createcomponent(
'floater%d' % index, (), 'Valuator', Floater,
(interior,), value = self._value[index],
text = self['labels'][index])
# Do this separately so command doesn't get executed during construction
f['command'] = lambda val, s=self, i=index: s._floaterSetAt(i, val)
f.pack(side = self['side'], expand = 1, fill = X)
self.floaterList.append(f)
# Make sure floaters are initialized
self.set(self['value'])
# Make sure input variables processed
self.initialiseoptions(FloaterGroup)
def _updateLabels(self):
if self['labels']:
for index in range(self['dim']):
self.floaterList[index]['text'] = self['labels'][index]
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.balloon.configure(state = 'balloon')
else:
self.balloon.configure(state = 'none')
def get(self):
return self._value
def getAt(self, index):
return self._value[index]
# This is the command is used to set the groups value
def set(self, value, fCommand = 1):
for i in range(self['dim']):
self._value[i] = value[i]
# Update floater, but don't execute its command
self.floaterList[i].set(value[i], 0)
if fCommand and (self['command'] is not None):
self['command'](self._value)
def setAt(self, index, value):
# Update floater and execute its command
self.floaterList[index].set(value)
# This is the command used by the floater
def _floaterSetAt(self, index, value):
self._value[index] = value
if self['command']:
self['command'](self._value)
def reset(self):
self.set(self['value'])
## SAMPLE CODE
if __name__ == '__main__':
# Initialise Tkinter and Pmw.
root = Toplevel()
root.title('Pmw Floater demonstration')
# Dummy command
def printVal(val):
print val
# Create and pack a Floater megawidget.
mega1 = Floater(root, command = printVal)
mega1.pack(side = 'left', expand = 1, fill = 'x')
"""
# These are things you can set/configure
# Starting value for floater
mega1['value'] = 123.456
mega1['text'] = 'Drive delta X'
# To change the color of the label:
mega1.label['foreground'] = 'Red'
# Max change/update, default is 100
# To have really fine control, for example
# mega1['maxVelocity'] = 0.1
# Number of digits to the right of the decimal point, default = 2
# mega1['numDigits'] = 5
"""
# To create a floater group to set an RGBA value:
group1 = FloaterGroup(root, dim = 4,
title = 'Simple RGBA Panel',
labels = ('R', 'G', 'B', 'A'),
Valuator_min = 0.0,
Valuator_max = 255.0,
Valuator_resolution = 1.0,
command = printVal)
# Uncomment this if you aren't running in IDLE
#root.mainloop()
|
toontownfunserver/Panda3D-1.9.0
|
direct/tkwidgets/Floater.py
|
Python
|
bsd-3-clause
| 13,935
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# domains.py: module for domains of model outcomes
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## IMPORTS ###################################################################
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future.utils import with_metaclass
from functools import reduce
from operator import mul
from scipy.special import binom
from math import factorial
from itertools import combinations_with_replacement, product
import numpy as np
from .utils import join_struct_arrays, separate_struct_array
import abc
import warnings
## EXPORTS ###################################################################
__all__ = [
'Domain',
'ProductDomain',
'RealDomain',
'IntegerDomain',
'MultinomialDomain'
]
## FUNCTIONS #################################################################
## ABSTRACT CLASSES AND MIXINS ###############################################
class Domain(with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class for domains of outcomes of models.
"""
## ABSTRACT PROPERTIES ##
@abc.abstractproperty
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
pass
@abc.abstractproperty
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
pass
@abc.abstractproperty
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
pass
@abc.abstractproperty
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
pass
@abc.abstractproperty
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type `dtype`.
:type: ``np.ndarray``
"""
pass
@abc.abstractproperty
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
pass
## CONCRETE PROPERTIES ##
@property
def is_discrete(self):
"""
Whether or not the domain has a countable number of values.
:type: `bool`
"""
return not self.is_continuous
## ABSTRACT METHODS ##
@abc.abstractmethod
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
pass
class ProductDomain(Domain):
"""
A domain made from the cartesian product of other domains.
:param Domain domains: ``Domain`` instances as separate arguments,
or as a singe list of ``Domain`` instances.
"""
def __init__(self, *domains):
if len(domains) == 1:
try:
self._domains = list(domains[0])
except:
self._domains = domains
else:
self._domains = domains
self._domains = domains
self._dtypes = [domain.example_point.dtype for domain in self._domains]
self._example_point = join_struct_arrays(
[np.array(domain.example_point) for domain in self._domains]
)
self._dtype = self._example_point.dtype
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return any([domain.is_continuous for domain in self._domains])
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return all([domain.is_finite for domain in self._domains])
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return self._dtype
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
if self.is_finite:
return reduce(mul, [domain.n_members for domain in self._domains], 1)
else:
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type `dtype`.
:type: ``np.ndarray``
"""
return self._example_point
@property
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
separate_values = [domain.values for domain in self._domains]
return np.concatenate([
join_struct_arrays(list(map(np.array, value)))
for value in product(*separate_values)
])
## METHODS ##
def _mytype(self, array):
# astype does weird stuff with struct names, and possibly
# depends on numpy version; hopefully
# the following is a bit more predictable since it passes through
# uint8
return separate_struct_array(array, self.dtype)[0]
def to_regular_arrays(self, array):
"""
Expands from an array of type `self.dtype` into a list of
arrays with dtypes corresponding to the factor domains.
:param np.ndarray array: An `np.array` of type `self.dtype`.
:rtype: ``list``
"""
return separate_struct_array(self._mytype(array), self._dtypes)
def from_regular_arrays(self, arrays):
"""
Merges a list of arrays (of the same shape) of dtypes
corresponding to the factor domains into a single array
with the dtype of the ``ProductDomain``.
:param list array: A list with each element of type ``np.ndarray``
:rtype: `np.ndarray`
"""
return self._mytype(join_struct_arrays([
array.astype(dtype)
for dtype, array in zip(self._dtypes, arrays)
]))
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
return all([
domain.in_domain(array)
for domain, array in
zip(self._domains, separate_struct_array(points, self._dtypes))
])
## CLASSES ###################################################################
class RealDomain(Domain):
"""
A domain specifying a contiguous (and possibly open ended) subset
of the real numbers.
:param float min: A number specifying the lowest possible value of the
domain.
:param float max: A number specifying the largest possible value of the
domain.
"""
def __init__(self, min=-np.inf, max=np.inf):
self._min = min
self._max = max
## PROPERTIES ##
@property
def min(self):
"""
Returns the minimum value of the domain.
:rtype: `float`
"""
return self._min
@property
def max(self):
"""
Returns the maximum value of the domain.
:rtype: `float`
"""
return self._max
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return True
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return False
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.float
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `None`.
:type: ``np.inf``
"""
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
if not np.isinf(self.min):
return np.array([self.min], dtype=self.dtype)
if not np.isinf(self.max):
return np.array([self.max], dtype=self.dtype)
else:
return np.array([0], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
return self.example_point
## METHODS ##
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
if np.all(np.isreal(points)):
are_greater = np.all(np.greater_equal(points, self._min))
are_smaller = np.all(np.less_equal(points, self._max))
return are_greater and are_smaller
else:
return False
class IntegerDomain(Domain):
"""
A domain specifying a contiguous (and possibly open ended) subset
of the integers.
Internally minimum and maximum are represented as
floats in order to handle the case of infinite maximum, and minimums. The
integer conversion function will be applied to the min and max values.
:param int min: A number specifying the lowest possible value of the
domain.
:param int max: A number specifying the largest possible value of the
domain.
Note: Yes, it is slightly unpythonic to specify `max` instead of `max`+1.
"""
def __init__(self, min=0, max=np.inf):
self._min = int(min) if not np.isinf(min) else min
self._max = int(max) if not np.isinf(max) else max
## PROPERTIES ##
@property
def min(self):
"""
Returns the minimum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._min) if not np.isinf(self._min) else self._min
@property
def max(self):
"""
Returns the maximum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._max) if not np.isinf(self._max) else self._max
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return False
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return not np.isinf(self.min) and not np.isinf(self.max)
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.int
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
if self.is_finite:
return int(self.max - self.min + 1)
else:
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
if not np.isinf(self.min):
return np.array([self._min], dtype=self.dtype)
if not np.isinf(self.max):
return np.array([self._max], dtype=self.dtype)
else:
return np.array([0], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
if self.is_finite:
return np.arange(self.min, self.max + 1, dtype = self.dtype)
else:
return self.example_point
## METHODS ##
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
if np.all(np.isreal(points)):
try:
are_integer = np.all(np.mod(points, 1) == 0)
except TypeError:
are_integer = False
are_greater = np.all(np.greater_equal(points, self._min))
are_smaller = np.all(np.less_equal(points, self._max))
return are_integer and are_greater and are_smaller
else:
return False
class MultinomialDomain(Domain):
"""
A domain specifying k-tuples of non-negative integers which
sum to a specific value.
:param int n_meas: The sum of any tuple in the domain.
:param int n_elements: The number of elements in a tuple.
"""
def __init__(self, n_meas, n_elements=2):
self._n_elements = n_elements
self._n_meas = n_meas
## PROPERTIES ##
@property
def n_meas(self):
"""
Returns the sum of any tuple in the domain.
:rtype: `int`
"""
return self._n_meas
@property
def n_elements(self):
"""
Returns the number of elements of a tuple in the domain.
:rtype: `int`
"""
return self._n_elements
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return False
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return True
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.dtype([('k', np.int, self.n_elements)])
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `None`.
:type: ``int``
"""
return int(binom(self.n_meas + self.n_elements -1, self.n_elements - 1))
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
return np.array([([self.n_meas] + [0] * (self.n_elements-1),)], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
# This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565
partition_array = np.empty((self.n_members, self.n_elements), dtype=int)
masks = np.identity(self.n_elements, dtype=int)
for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)):
partition_array[i,:] = sum(c)
# Convert to dtype before returning
return self.from_regular_array(partition_array)
## METHODS ##
def to_regular_array(self, A):
"""
Converts from an array of type `self.dtype` to an array
of type `int` with an additional index labeling the
tuple indeces.
:param np.ndarray A: An `np.array` of type `self.dtype`.
:rtype: `np.ndarray`
"""
# this could be a static method, but we choose to be consistent with
# from_regular_array
return A.view((int, len(A.dtype.names))).reshape(A.shape + (-1,))
def from_regular_array(self, A):
"""
Converts from an array of type `int` where the last index
is assumed to have length `self.n_elements` to an array
of type `self.d_type` with one fewer index.
:param np.ndarray A: An `np.array` of type `int`.
:rtype: `np.ndarray`
"""
dims = A.shape[:-1]
return A.reshape((np.prod(dims),-1)).view(dtype=self.dtype).squeeze(-1).reshape(dims)
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
array_view = self.to_regular_array(points)
non_negative = np.all(np.greater_equal(array_view, 0))
correct_sum = np.all(np.sum(array_view, axis=-1) == self.n_meas)
return non_negative and correct_sum
|
QInfer/python-qinfer
|
src/qinfer/domains.py
|
Python
|
bsd-3-clause
| 19,964
|
#!/usr/bin/env python
'''
Plot degree values for a given set of nodes in a simple circle plot.
'''
import numpy as np
import matplotlib.pyplot as plt
import mne
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_degree_circle
import bct
orig_labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
con_fname = get_jumeg_path() + '/data/sample,aparc-con.npy'
con = np.load(con_fname)
con_ = con[0, :, :, 2] + con[0, :, :, 2].T
# compute the degree
degrees = mne.connectivity.degree(con_, threshold_prop=0.2)
fig, ax = plot_degree_circle(degrees, yaml_fname, orig_labels_fname)
|
fboers/jumeg
|
examples/connectivity/plot_degree_circle.py
|
Python
|
bsd-3-clause
| 694
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
|
JWDebelius/scikit-bio
|
skbio/parse/sequences/tests/__init__.py
|
Python
|
bsd-3-clause
| 378
|
# -*- coding: utf-8 -*-
"""Unit/Functional tests"""
from __future__ import with_statement, unicode_literals
import datetime
import os
import sys
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db.models import Q
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
import pytest
from treebeard import numconv
from treebeard.admin import admin_factory
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant,\
PathOverflow, MissingNodeOrderBy
from treebeard.forms import movenodeform_factory
from treebeard.templatetags.admin_tree import get_static_url
from treebeard.tests import models
BASE_DATA = [
{'data': {'desc': '1'}},
{'data': {'desc': '2'}, 'children': [
{'data': {'desc': '21'}},
{'data': {'desc': '22'}},
{'data': {'desc': '23'}, 'children': [
{'data': {'desc': '231'}},
]},
{'data': {'desc': '24'}},
]},
{'data': {'desc': '3'}},
{'data': {'desc': '4'}, 'children': [
{'data': {'desc': '41'}},
]}]
UNCHANGED = [
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
def _prepare_db_test(request):
case = TestCase(methodName='__init__')
case._pre_setup()
request.addfinalizer(case._post_teardown)
return request.param
@pytest.fixture(scope='function',
params=models.BASE_MODELS + models.PROXY_MODELS)
def model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.BASE_MODELS)
def model_without_proxy(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.UNICODE_MODELS)
def model_with_unicode(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.SORTED_MODELS)
def sorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.RELATED_MODELS)
def related_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.MP_SHORTPATH_MODELS)
def mpshort_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeShortPath])
def mpshortnotsorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeAlphabet])
def mpalphabet_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSortedAutoNow])
def mpsortedautonow_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSmallStep])
def mpsmallstep_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestManyToManyWithUser])
def mpm2muser_model(request):
return _prepare_db_test(request)
class TestTreeBase(object):
def got(self, model):
if model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
return [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def _assert_get_annotated_list(self, model, expected, parent=None):
got = [
(obj[0].desc, obj[1]['open'], obj[1]['close'], obj[1]['level'])
for obj in model.get_annotated_list(parent)
]
assert expected == got
class TestEmptyTree(TestTreeBase):
def test_load_bulk_empty(self, model):
ids = model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in UNCHANGED]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == UNCHANGED
def test_dump_bulk_empty(self, model):
assert model.dump_bulk() == []
def test_add_root_empty(self, model):
model.add_root(desc='1')
expected = [('1', 1, 0)]
assert self.got(model) == expected
def test_get_root_nodes_empty(self, model):
got = model.get_root_nodes()
expected = []
assert [node.desc for node in got] == expected
def test_get_first_root_node_empty(self, model):
got = model.get_first_root_node()
assert got is None
def test_get_last_root_node_empty(self, model):
got = model.get_last_root_node()
assert got is None
def test_get_tree(self, model):
got = list(model.get_tree())
assert got == []
def test_get_annotated_list(self, model):
expected = []
self._assert_get_annotated_list(model, expected)
class TestNonEmptyTree(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
class TestClassMethods(TestNonEmptyTree):
def test_load_bulk_existing(self, model):
# inserting on an existing node
node = model.objects.get(desc='231')
ids = model.load_bulk(BASE_DATA, node)
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
expected_descs = ['1', '2', '21', '22', '23', '231', '24',
'3', '4', '41']
got_descs = [obj.desc for obj in model.objects.filter(id__in=ids)]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == expected
def test_get_tree_all(self, model):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_dump_bulk_all(self, model):
assert model.dump_bulk(keep_ids=False) == BASE_DATA
def test_get_tree_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree(node)]
expected = [('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0)]
assert got == expected
def test_get_tree_leaf(self, model):
node = model.objects.get(desc='1')
assert 0 == node.get_children_count()
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree(node)]
expected = [('1', 1, 0)]
assert got == expected
def test_get_annotated_list_all(self, model):
expected = [('1', True, [], 0), ('2', False, [], 0),
('21', True, [], 1), ('22', False, [], 1),
('23', False, [], 1), ('231', True, [0], 2),
('24', False, [0], 1), ('3', False, [], 0),
('4', False, [], 0), ('41', True, [0, 1], 1)]
self._assert_get_annotated_list(model, expected)
def test_get_annotated_list_node(self, model):
node = model.objects.get(desc='2')
expected = [('2', True, [], 0), ('21', True, [], 1),
('22', False, [], 1), ('23', False, [], 1),
('231', True, [0], 2), ('24', False, [0, 1], 1)]
self._assert_get_annotated_list(model, expected, node)
def test_get_annotated_list_leaf(self, model):
node = model.objects.get(desc='1')
expected = [('1', True, [0], 0)]
self._assert_get_annotated_list(model, expected, node)
def test_dump_bulk_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = model.dump_bulk(node, False)
expected = [{'data': {'desc': '231'}, 'children': BASE_DATA}]
assert got == expected
def test_load_and_dump_bulk_keeping_ids(self, model):
exp = model.dump_bulk(keep_ids=True)
model.objects.all().delete()
model.load_bulk(exp, None, True)
got = model.dump_bulk(keep_ids=True)
assert got == exp
# do we really have an unchaged tree after the dump/delete/load?
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_load_and_dump_bulk_with_fk(self, related_model):
# https://bitbucket.org/tabo/django-treebeard/issue/48/
related_model.objects.all().delete()
related, created = models.RelatedModel.objects.get_or_create(
desc="Test %s" % related_model.__name__)
related_data = [
{'data': {'desc': '1', 'related': related.pk}},
{'data': {'desc': '2', 'related': related.pk}, 'children': [
{'data': {'desc': '21', 'related': related.pk}},
{'data': {'desc': '22', 'related': related.pk}},
{'data': {'desc': '23', 'related': related.pk}, 'children': [
{'data': {'desc': '231', 'related': related.pk}},
]},
{'data': {'desc': '24', 'related': related.pk}},
]},
{'data': {'desc': '3', 'related': related.pk}},
{'data': {'desc': '4', 'related': related.pk}, 'children': [
{'data': {'desc': '41', 'related': related.pk}},
]}]
related_model.load_bulk(related_data)
got = related_model.dump_bulk(keep_ids=False)
assert got == related_data
def test_get_root_nodes(self, model):
got = model.get_root_nodes()
expected = ['1', '2', '3', '4']
assert [node.desc for node in got] == expected
def test_get_first_root_node(self, model):
got = model.get_first_root_node()
assert got.desc == '1'
def test_get_last_root_node(self, model):
got = model.get_last_root_node()
assert got.desc == '4'
def test_add_root(self, model):
obj = model.add_root(desc='5')
assert obj.get_depth() == 1
assert model.get_last_root_node().desc == '5'
class TestSimpleNodeMethods(TestNonEmptyTree):
def test_is_root(self, model):
data = [
('2', True),
('1', True),
('4', True),
('21', False),
('24', False),
('22', False),
('231', False),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_root()
assert got == expected
def test_is_leaf(self, model):
data = [
('2', False),
('23', False),
('231', True),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_leaf()
assert got == expected
def test_get_root(self, model):
data = [
('2', '2'),
('1', '1'),
('4', '4'),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '2'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_root()
assert node.desc == expected
def test_get_parent(self, model):
data = [
('2', None),
('1', None),
('4', None),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '23'),
]
data = dict(data)
objs = {}
for desc, expected in data.items():
node = model.objects.get(desc=desc)
parent = node.get_parent()
if expected:
assert parent.desc == expected
else:
assert parent is None
objs[desc] = node
# corrupt the objects' parent cache
node._parent_obj = 'CORRUPTED!!!'
for desc, expected in data.items():
node = objs[desc]
# asking get_parent to not use the parent cache (since we
# corrupted it in the previous loop)
parent = node.get_parent(True)
if expected:
assert parent.desc == expected
else:
assert parent is None
def test_get_children(self, model):
data = [
('2', ['21', '22', '23', '24']),
('23', ['231']),
('231', []),
]
for desc, expected in data:
children = model.objects.get(desc=desc).get_children()
assert [node.desc for node in children] == expected
def test_get_children_count(self, model):
data = [
('2', 4),
('23', 1),
('231', 0),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_children_count()
assert got == expected
def test_get_siblings(self, model):
data = [
('2', ['1', '2', '3', '4']),
('21', ['21', '22', '23', '24']),
('231', ['231']),
]
for desc, expected in data:
siblings = model.objects.get(desc=desc).get_siblings()
assert [node.desc for node in siblings] == expected
def test_get_first_sibling(self, model):
data = [
('2', '1'),
('1', '1'),
('4', '1'),
('21', '21'),
('24', '21'),
('22', '21'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_sibling()
assert node.desc == expected
def test_get_prev_sibling(self, model):
data = [
('2', '1'),
('1', None),
('4', '3'),
('21', None),
('24', '23'),
('22', '21'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_prev_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_next_sibling(self, model):
data = [
('2', '3'),
('1', '2'),
('4', None),
('21', '22'),
('24', None),
('22', '23'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_next_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_last_sibling(self, model):
data = [
('2', '4'),
('1', '4'),
('4', '4'),
('21', '24'),
('24', '24'),
('22', '24'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_sibling()
assert node.desc == expected
def test_get_first_child(self, model):
data = [
('2', '21'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_last_child(self, model):
data = [
('2', '24'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_ancestors(self, model):
data = [
('2', []),
('21', ['2']),
('231', ['2', '23']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_ancestors()
assert [node.desc for node in nodes] == expected
def test_get_descendants(self, model):
data = [
('2', ['21', '22', '23', '231', '24']),
('23', ['231']),
('231', []),
('1', []),
('4', ['41']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_descendants()
assert [node.desc for node in nodes] == expected
def test_get_descendant_count(self, model):
data = [
('2', 5),
('23', 1),
('231', 0),
('1', 0),
('4', 1),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_descendant_count()
assert got == expected
def test_is_sibling_of(self, model):
data = [
('2', '2', True),
('2', '1', True),
('21', '2', False),
('231', '2', False),
('22', '23', True),
('231', '23', False),
('231', '231', True),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_sibling_of(node2) == expected
def test_is_child_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', False),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_child_of(node2) == expected
def test_is_descendant_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', True),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_descendant_of(node2) == expected
class TestAddChild(TestNonEmptyTree):
def test_add_child_to_leaf(self, model):
model.objects.get(desc='231').add_child(desc='2311')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 1),
('2311', 4, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_child_to_node(self, model):
model.objects.get(desc='2').add_child(desc='25')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('25', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestAddSibling(TestNonEmptyTree):
def test_add_sibling_invalid_pos(self, model):
with pytest.raises(InvalidPosition):
model.objects.get(desc='231').add_sibling('invalid_pos')
def test_add_sibling_missing_nodeorderby(self, model):
node_wchildren = model.objects.get(desc='2')
with pytest.raises(MissingNodeOrderBy):
node_wchildren.add_sibling('sorted-sibling', desc='aaa')
def test_add_sibling_last_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('last-sibling', desc='5')
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == '5'
def test_add_sibling_last(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('last-sibling', desc='232')
assert obj.get_depth() == 3
assert node.get_last_sibling().desc == '232'
def test_add_sibling_first_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_first(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('new', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('new', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('new', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft_root(self, model):
node = model.objects.get(desc='1')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('new', 3, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('new', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('new', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright_root(self, model):
node = model.objects.get(desc='4')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('new', 1, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('new', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestDelete(TestNonEmptyTree):
@classmethod
def setup_class(cls):
TestNonEmptyTree.setup_class()
for model, dep_model in zip(models.BASE_MODELS, models.DEP_MODELS):
for node in model.objects.all():
dep_model(node=node).save()
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.DEP_MODELS + models.BASE_MODELS)
def test_delete_leaf(self, model):
model.objects.get(desc='231').delete()
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_node(self, model):
model.objects.get(desc='23').delete()
expected = [('1', 1, 0),
('2', 1, 3),
('21', 2, 0),
('22', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_root(self, model):
model.objects.get(desc='2').delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_root_nodes(self, model):
model.objects.filter(desc__in=('2', '3')).delete()
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_children(self, model):
model.objects.filter(desc__in=('2', '23', '231')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_nonexistant_nodes(self, model):
model.objects.filter(desc__in=('ZZZ', 'XXX')).delete()
assert self.got(model) == UNCHANGED
def test_delete_same_node_twice(self, model):
model.objects.filter(desc__in=('2', '2')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_all_root_nodes(self, model):
model.get_root_nodes().delete()
count = model.objects.count()
assert count == 0
def test_delete_all_nodes(self, model):
model.objects.all().delete()
count = model.objects.count()
assert count == 0
class TestMoveErrors(TestNonEmptyTree):
def test_move_invalid_pos(self, model):
node = model.objects.get(desc='231')
with pytest.raises(InvalidPosition):
node.move(node, 'invalid_pos')
def test_move_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
with pytest.raises(InvalidMoveToDescendant):
node.move(target, 'first-sibling')
def test_move_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-child')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-sibling')
class TestMoveSortedErrors(TestTreeBase):
def test_nonsorted_move_in_sorted(self, sorted_model):
node = sorted_model.add_root(val1=3, val2=3, desc='zxy')
with pytest.raises(InvalidPosition):
node.move(node, 'left')
class TestMoveLeafRoot(TestNonEmptyTree):
def test_move_leaf_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('231', 1, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('231', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveLeaf(TestNonEmptyTree):
def test_move_leaf_last_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('231', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('231', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_itself(self, model):
target = model.objects.get(desc='231')
model.objects.get(desc='231').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_leaf_last_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveBranchRoot(TestNonEmptyTree):
def test_move_branch_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 1, 1),
('41', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling_root(self, model):
target = model.objects.get(desc='2').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling_root(self, model):
target = model.objects.get(desc='2').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestMoveBranch(TestNonEmptyTree):
def test_move_branch_first_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('4', 2, 1),
('41', 3, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('4', 2, 1),
('41', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling(self, model):
target = model.objects.get(desc='23').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling(self, model):
target = model.objects.get(desc='23').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_itself_sibling(self, model):
target = model.objects.get(desc='4')
model.objects.get(desc='4').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_branch_first_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('4', 3, 1),
('41', 4, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('4', 3, 1),
('41', 4, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestTreeSorted(TestTreeBase):
def got(self, sorted_model):
return [(o.val1, o.val2, o.desc, o.get_depth(), o.get_children_count())
for o in sorted_model.get_tree()]
def test_add_root_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
expected = [(1, 4, 'bcd', 1, 0),
(2, 2, 'qwe', 1, 0),
(2, 5, 'zxy', 1, 0),
(3, 2, 'vcx', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'zxy', 1, 0),
(4, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
def test_add_child_root_sorted(self, sorted_model):
root = sorted_model.add_root(val1=0, val2=0, desc='aaa')
root.add_child(val1=3, val2=3, desc='zxy')
root.add_child(val1=1, val2=4, desc='bcd')
root.add_child(val1=2, val2=5, desc='zxy')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=4, val2=1, desc='fgh')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=2, val2=2, desc='qwe')
root.add_child(val1=3, val2=2, desc='vcx')
expected = [(0, 0, 'aaa', 1, 8),
(1, 4, 'bcd', 2, 0),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_add_child_nonroot_sorted(self, sorted_model):
get_node = lambda node_id: sorted_model.objects.get(pk=node_id)
root_id = sorted_model.add_root(val1=0, val2=0, desc='a').pk
node_id = get_node(root_id).add_child(val1=0, val2=0, desc='ac').pk
get_node(root_id).add_child(val1=0, val2=0, desc='aa')
get_node(root_id).add_child(val1=0, val2=0, desc='av')
get_node(node_id).add_child(val1=0, val2=0, desc='aca')
get_node(node_id).add_child(val1=0, val2=0, desc='acc')
get_node(node_id).add_child(val1=0, val2=0, desc='acb')
expected = [(0, 0, 'a', 1, 3),
(0, 0, 'aa', 2, 0),
(0, 0, 'ac', 2, 3),
(0, 0, 'aca', 3, 0),
(0, 0, 'acb', 3, 0),
(0, 0, 'acc', 3, 0),
(0, 0, 'av', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.move(target, 'sorted-child')
expected = [(1, 4, 'bcd', 1, 7),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sortedsibling(self, sorted_model):
# https://bitbucket.org/tabo/django-treebeard/issue/27
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.val1 -= 2
node.save()
node.move(target, 'sorted-sibling')
expected = [(0, 2, 'qwe', 1, 0),
(0, 5, 'zxy', 1, 0),
(1, 2, 'vcx', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'zxy', 1, 0),
(1, 4, 'bcd', 1, 0),
(2, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
class TestMP_TreeAlphabet(TestTreeBase):
def test_alphabet(self, mpalphabet_model):
if not os.getenv('TREEBEARD_TEST_ALPHABET', False):
# run this test only if the enviroment variable is set
return
basealpha = numconv.BASE85
got_err = False
last_good = None
for alphabetlen in range(35, len(basealpha) + 1):
alphabet = basealpha[0:alphabetlen]
expected = [alphabet[0] + char for char in alphabet[1:]]
expected.extend([alphabet[1] + char for char in alphabet])
expected.append(alphabet[2] + alphabet[0])
# remove all nodes
mpalphabet_model.objects.all().delete()
# change the model's alphabet
mpalphabet_model.alphabet = alphabet
# insert root nodes
for pos in range(len(alphabet) * 2):
try:
mpalphabet_model.add_root(numval=pos)
except:
got_err = True
break
if got_err:
break
got = [obj.path
for obj in mpalphabet_model.objects.all()]
if got != expected:
got_err = True
last_good = alphabet
sys.stdout.write(
'\nThe best BASE85 based alphabet for your setup is: %s\n' % (
last_good, )
)
sys.stdout.flush()
class TestHelpers(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
for node in model.get_root_nodes():
model.load_bulk(BASE_DATA, node)
model.add_root(desc='5')
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
def test_descendants_group_count_root(self, model):
expected = [(o.desc, o.get_descendant_count())
for o in model.get_root_nodes()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count()]
assert got == expected
def test_descendants_group_count_node(self, model):
parent = model.get_root_nodes().get(desc='2')
expected = [(o.desc, o.get_descendant_count())
for o in parent.get_children()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count(parent)]
assert got == expected
class TestMP_TreeSortedAutoNow(TestTreeBase):
"""
The sorting mechanism used by treebeard when adding a node can fail if the
ordering is using an "auto_now" field
"""
def test_sorted_by_autonow_workaround(self, mpsortedautonow_model):
# workaround
for i in range(1, 5):
mpsortedautonow_model.add_root(desc='node%d' % (i, ),
created=datetime.datetime.now())
def test_sorted_by_autonow_FAIL(self, mpsortedautonow_model):
"""
This test asserts that we have a problem.
fix this, somehow
"""
mpsortedautonow_model.add_root(desc='node1')
with pytest.raises(ValueError):
mpsortedautonow_model.add_root(desc='node2')
class TestMP_TreeStepOverflow(TestTreeBase):
def test_add_root(self, mpsmallstep_model):
method = mpsmallstep_model.add_root
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_child(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
method = root.add_child
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_sibling(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
positions = ('first-sibling', 'left', 'right', 'last-sibling')
for pos in positions:
with pytest.raises(PathOverflow):
root.get_last_child().add_sibling(pos)
def test_move(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
newroot = mpsmallstep_model.add_root()
targets = [(root, ['first-child', 'last-child']),
(root.get_first_child(), ['first-sibling',
'left',
'right',
'last-sibling'])]
for target, positions in targets:
for pos in positions:
with pytest.raises(PathOverflow):
newroot.move(target, pos)
class TestMP_TreeShortPath(TestTreeBase):
"""Test a tree with a very small path field (max_length=4) and a
steplen of 1
"""
def test_short_path(self, mpshortnotsorted_model):
obj = mpshortnotsorted_model.add_root()
obj = obj.add_child().add_child().add_child()
with pytest.raises(PathOverflow):
obj.add_child()
class TestMP_TreeFindProblems(TestTreeBase):
def test_find_problems(self, mpalphabet_model):
mpalphabet_model.alphabet = '01234'
mpalphabet_model(path='01', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='1', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='111', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='abcd', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='qa#$%!', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='0201', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='020201', depth=3, numchild=0, numval=0).save()
mpalphabet_model(path='03', depth=1, numchild=2, numval=0).save()
mpalphabet_model(path='0301', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='030102', depth=3, numchild=10, numval=0).save()
mpalphabet_model(path='04', depth=10, numchild=1, numval=0).save()
mpalphabet_model(path='0401', depth=20, numchild=0, numval=0).save()
def got(ids):
return [o.path for o in
mpalphabet_model.objects.filter(id__in=ids)]
(evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild) = (
mpalphabet_model.find_problems())
assert ['abcd', 'qa#$%!'] == got(evil_chars)
assert ['1', '111'] == got(bad_steplen)
assert ['0201', '020201'] == got(orphans)
assert ['03', '0301', '030102'] == got(wrong_numchild)
assert ['04', '0401'] == got(wrong_depth)
class TestMP_TreeFix(TestTreeBase):
expected_no_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('11', 'u', 2, 1),
('111', 'i', 3, 1),
('1111', 'e', 4, 0),
('12', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('431', 'i', 3, 1),
('4311', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'a', 1, 4),
('11', 'a', 2, 0),
('12', 'a', 2, 0),
('13', 'o', 2, 0),
('14', 'u', 2, 1),
('141', 'i', 3, 1),
('1411', 'e', 4, 0),
('2', 'b', 1, 2),
('21', 'o', 2, 0),
('22', 'u', 2, 1),
('221', 'i', 3, 1),
('2211', 'e', 4, 0),
('3', 'd', 1, 0),
('4', 'g', 1, 0)]}
expected_with_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)]}
def got(self, model):
return [(o.path, o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def add_broken_test_data(self, model):
model(path='4', depth=2, numchild=2, desc='a').save()
model(path='13', depth=1000, numchild=0, desc='u').save()
model(path='14', depth=4, numchild=500, desc='o').save()
model(path='134', depth=321, numchild=543, desc='i').save()
model(path='1343', depth=321, numchild=543, desc='e').save()
model(path='42', depth=1, numchild=1, desc='a').save()
model(path='43', depth=1000, numchild=0, desc='u').save()
model(path='44', depth=4, numchild=500, desc='o').save()
model(path='434', depth=321, numchild=543, desc='i').save()
model(path='4343', depth=321, numchild=543, desc='e').save()
model(path='41', depth=1, numchild=1, desc='a').save()
model(path='3', depth=221, numchild=322, desc='g').save()
model(path='1', depth=10, numchild=3, desc='b').save()
model(path='2', depth=10, numchild=3, desc='d').save()
def test_fix_tree_non_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=False)
got = self.got(mpshort_model)
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
def test_fix_tree_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=True)
got = self.got(mpshort_model)
expected = self.expected_no_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
class TestIssues(TestTreeBase):
# test for http://code.google.com/p/django-treebeard/issues/detail?id=14
def test_many_to_many_django_user_anonymous(self, mpm2muser_model):
# Using AnonymousUser() in the querysets will expose non-treebeard
# related problems in Django 1.0
#
# Postgres:
# ProgrammingError: can't adapt
# SQLite:
# InterfaceError: Error binding parameter 4 - probably unsupported
# type.
# MySQL compared a string to an integer field:
# `treebeard_mp_testissue14_users`.`user_id` = 'AnonymousUser'
#
# Using a None field instead works (will be translated to IS NULL).
#
# anonuserobj = AnonymousUser()
anonuserobj = None
def qs_check(qs, expected):
assert [o.name for o in qs] == expected
def qs_check_first_or_user(expected, root, user):
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
expected)
user = User.objects.create_user('test_user', 'test@example.com',
'testpasswd')
user.save()
root = mpm2muser_model.add_root(name="the root node")
root.add_child(name="first")
second = root.add_child(name="second")
qs_check(root.get_children(), ['first', 'second'])
qs_check(root.get_children().filter(Q(name="first")), ['first'])
qs_check(root.get_children().filter(Q(users=user)), [])
qs_check_first_or_user(['first'], root, user)
qs_check_first_or_user(['first', 'second'], root, anonuserobj)
user = User.objects.get(username="test_user")
second.users.add(user)
qs_check_first_or_user(['first', 'second'], root, user)
qs_check_first_or_user(['first'], root, anonuserobj)
class TestMoveNodeForm(TestNonEmptyTree):
def _get_nodes_list(self, nodes):
return [(pk, '%sNode %d' % (' ' * 4 * (depth - 1), pk))
for pk, depth in nodes]
def _assert_nodes_in_choices(self, form, nodes):
choices = form.fields['_ref_node_id'].choices
assert 0 == choices.pop(0)[0]
assert nodes == [(choice[0], choice[1]) for choice in choices]
def _move_node_helper(self, node, safe_parent_nodes):
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ['desc', '_position', '_ref_node_id'] == list(
form.base_fields.keys())
got = [choice[0] for choice in form.fields['_position'].choices]
assert ['first-child', 'left', 'right'] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
def _get_node_ids_and_depths(self, nodes):
return [(node.id, node.get_depth()) for node in nodes]
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_leaf_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop()
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_admin(self, model):
request = None
nodes = list(model.get_tree())
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
for node in model.objects.all():
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
got = list(ma.get_form(request).base_fields.keys())
desc_pos_refnodeid = ['desc', '_position', '_ref_node_id']
assert desc_pos_refnodeid == got
got = ma.get_fieldsets(request)
expected = [(None, {'fields': desc_pos_refnodeid})]
assert got == expected
got = ma.get_fieldsets(request, node)
assert got == expected
form = ma.get_form(request)()
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
class TestModelAdmin(TestNonEmptyTree):
def test_default_fields(self, model):
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
assert list(ma.get_form(None).base_fields.keys()) == [
'desc', '_position', '_ref_node_id']
class TestSortedForm(TestTreeSorted):
def test_sorted_form(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
form_class = movenodeform_factory(sorted_model)
form = form_class()
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
form = form_class(instance=sorted_model.objects.get(desc='bcd'))
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
class TestForm(TestNonEmptyTree):
def test_form(self, model):
form_class = movenodeform_factory(model)
form = form_class()
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
form = form_class(instance=model.objects.get(desc='1'))
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
def test_get_position_ref_node(self, model):
form_class = movenodeform_factory(model)
instance_parent = model.objects.get(desc='1')
form = form_class(instance=instance_parent)
assert form._get_position_ref_node(instance_parent) == {
'_position': 'first-child',
'_ref_node_id': ''
}
instance_child = model.objects.get(desc='21')
form = form_class(instance=instance_child)
assert form._get_position_ref_node(instance_child) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk
}
instance_grandchild = model.objects.get(desc='22')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'right',
'_ref_node_id': model.objects.get(desc='21').pk
}
instance_grandchild = model.objects.get(desc='231')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='23').pk
}
def test_clean_cleaned_data(self, model):
instance_parent = model.objects.get(desc='1')
_position = 'first-child'
_ref_node_id = ''
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': _position,
'_ref_node_id': _ref_node_id,
'desc': instance_parent.desc
}
)
assert form.is_valid()
assert form._clean_cleaned_data() == (_position, _ref_node_id)
def test_save_edit(self, model):
instance_parent = model.objects.get(desc='1')
original_count = len(model.objects.all())
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk,
'desc': instance_parent.desc
}
)
assert form.is_valid()
saved_instance = form.save()
assert original_count == model.objects.all().count()
assert saved_instance.get_children_count() == 0
assert saved_instance.get_depth() == 2
assert not saved_instance.is_root()
assert saved_instance.is_leaf()
# Return to original state
form_class = movenodeform_factory(model)
form = form_class(
instance=saved_instance,
data={
'_position': 'first-child',
'_ref_node_id': '',
'desc': saved_instance.desc
}
)
assert form.is_valid()
restored_instance = form.save()
assert original_count == model.objects.all().count()
assert restored_instance.get_children_count() == 0
assert restored_instance.get_depth() == 1
assert restored_instance.is_root()
assert restored_instance.is_leaf()
def test_save_new(self, model):
original_count = model.objects.all().count()
assert original_count == 10
_position = 'first-child'
form_class = movenodeform_factory(model)
form = form_class(
data={'_position': _position, 'desc': 'New Form Test'})
assert form.is_valid()
assert form.save() is not None
assert original_count < model.objects.all().count()
class TestAdminTreeTemplateTags(TestCase):
def test_treebeard_css(self):
template = Template("{% load admin_tree %}{% treebeard_css %}")
context = Context()
rendered = template.render(context)
expected = ('<link rel="stylesheet" type="text/css" '
'href="/treebeard/treebeard-admin.css"/>')
assert expected == rendered
def test_treebeard_js(self):
template = Template("{% load admin_tree %}{% treebeard_js %}")
context = Context()
rendered = template.render(context)
expected = ('<script type="text/javascript" src="jsi18n"></script>'
'<script type="text/javascript" '
'src="/treebeard/treebeard-admin.js"></script>'
'<script>(function($){'
'jQuery = $.noConflict(true);'
'})(django.jQuery);</script>'
'<script type="text/javascript" '
'src="/treebeard/jquery-ui-1.8.5.custom.min.js"></script>')
assert expected == rendered
def test_get_static_url(self):
with self.settings(STATIC_URL=None, MEDIA_URL=None):
assert get_static_url() == '/'
with self.settings(STATIC_URL='/static/', MEDIA_URL=None):
assert get_static_url() == '/static/'
with self.settings(STATIC_URL=None, MEDIA_URL='/media/'):
assert get_static_url() == '/media/'
with self.settings(STATIC_URL='/static/', MEDIA_URL='/media/'):
assert get_static_url() == '/static/'
class TestAdminTree(TestNonEmptyTree):
template = Template('{% load admin_tree %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">Node %i</a>' % (url, object.pk)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_unicode_result_tree(self, model_with_unicode):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_with_unicode
# Add a unicode description
model.add_root(desc='áéîøü')
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">%s</a>' % (url, object.desc)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_result_filtered(self, model_without_proxy):
""" Test template changes with filters or pagination.
"""
model = model_without_proxy
# Filtered GET
request = RequestFactory().get('/admin/tree/?desc=1')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Filtered
assert '<input type="hidden" id="has-filters" value="1"/>' in \
table_output
# Not Filtered GET, it should ignore pagination
request = RequestFactory().get('/admin/tree/?p=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
# Not Filtered GET, it should ignore all
request = RequestFactory().get('/admin/tree/?all=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
class TestAdminTreeList(TestNonEmptyTree):
template = Template('{% load admin_tree_list %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree_list(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = '<li><a href="%i/" >Node %i</a>'
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk)
assert expected_output in table_output
def test_result_tree_list_with_action(self, model_without_proxy):
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request,
'action_form': True})
table_output = self.template.render(context)
output_template = ('<input type="checkbox" class="action-select" '
'value="%i" name="_selected_action" />'
'<a href="%i/" >Node %i</a>')
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk,
object.pk)
assert expected_output in table_output
def test_result_tree_list_with_get(self, model_without_proxy):
model = model_without_proxy
# Test t GET parameter with value id
request = RequestFactory().get('/admin/tree/?t=id')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = "opener.dismissRelatedLookupPopup(window, '%i');"
for object in model.objects.all():
expected_output = output_template % object.pk
assert expected_output in table_output
class TestTreeAdmin(TestNonEmptyTree):
site = AdminSite()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = user
return request
def _mocked_request(self, data):
request_factory = RequestFactory()
request = request_factory.post('/', data=data)
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
return request
def _get_admin_obj(self, model_class):
form_class = movenodeform_factory(model_class)
admin_class = admin_factory(form_class)
return admin_class(model_class, self.site)
def test_changelist_view(self):
tmp_user = self._create_superuser('changelist_tmp')
request = self._mocked_authenticated_request('/', tmp_user)
admin_obj = self._get_admin_obj(models.AL_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template == 'admin/tree_list.html'
admin_obj = self._get_admin_obj(models.MP_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template != 'admin/tree_list.html'
def test_get_node(self, model):
admin_obj = self._get_admin_obj(model)
target = model.objects.get(desc='2')
assert admin_obj.get_node(target.pk) == target
def test_move_node_validate_keyerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.move_node(request)
assert response.status_code == 400
request = self._mocked_request(data={'node_id': 1})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_node_validate_valueerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': 1,
'sibling_id': 2,
'as_child': 'invalid'})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_validate_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'sorted-child',
request, target=node)
assert response.status_code == 400
response = admin_obj.try_to_move_node(True, node, 'sorted-sibling',
request, target=node)
assert response.status_code == 400
def test_move_validate_invalid_pos(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'invalid_pos',
request, target=node)
assert response.status_code == 400
def test_move_validate_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'first-sibling',
request, target)
assert response.status_code == 400
def test_move_left(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 0})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_last_child(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 1})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
|
suziesparkle/wagtail
|
wagtail/vendor/django-treebeard/treebeard/tests/test_treebeard.py
|
Python
|
bsd-3-clause
| 90,877
|
from django.conf import settings
from django.template import (Template, Context, TemplateDoesNotExist,
TemplateSyntaxError)
from django.utils.encoding import smart_unicode
from django.utils.hashcompat import md5_constructor
from django.views.debug import ExceptionReporter
class ImprovedExceptionReporter(ExceptionReporter):
def __init__(self, request, exc_type, exc_value, frames):
ExceptionReporter.__init__(self, request, exc_type, exc_value, None)
self.frames = frames
def get_traceback_frames(self):
return self.frames
def get_traceback_html(self):
"Return HTML code for traceback."
if issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and
isinstance(self.exc_value, TemplateSyntaxError)):
self.get_template_exception_info()
frames = self.get_traceback_frames()
unicode_hint = ''
if issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'exception_type': self.exc_type.__name__,
'exception_value': smart_unicode(self.exc_value, errors='replace'),
'unicode_hint': unicode_hint,
'frames': frames,
'lastframe': frames[-1],
'request': self.request,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
})
return t.render(c)
def construct_checksum(error):
checksum = md5_constructor(str(error.level))
checksum.update(error.class_name or '')
message = error.traceback or error.message
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
checksum.update(message)
return checksum.hexdigest()
TECHNICAL_500_TEMPLATE = """
<div id="summary">
<h1>{{ exception_type }} at {{ request.path_info|escape }}</h1>
<pre class="exception_value">{{ exception_value|escape }}</pre>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|escape }}</pre></td>
</tr>
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|escape }}</strong></p>
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Template error</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
<div id="traceback">
<h2>Traceback <span class="commands"><a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span></h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ frame.context_line|escape }} <span>...</span></li></ol>
{% if frame.post_context %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|escape }}</td>
<td class="code"><div>{{ var.1|pprint|escape }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<div id="pastebinTraceback" class="pastebin">
<textarea id="traceback_area" cols="140" rows="25">
Environment:
{% if request.META %}Request Method: {{ request.META.REQUEST_METHOD }}{% endif %}
Request URL: {{ request.build_absolute_uri|escape }}
Python Version: {{ sys_version_info }}
{% if template_does_not_exist %}Template Loader Error: (Unavailable in db-log)
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }} at {{ request.path_info|escape }}
Exception Value: {{ exception_value|escape }}
</textarea>
</div>
</div>
{% if request %}
<div id="requestinfo">
<h2>Request information</h2>
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if request.POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
{% if request.META %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No META data</p>
{% endif %}
</div>
{% endif %}
"""
|
dcramer/django-db-log
|
djangodblog/helpers.py
|
Python
|
bsd-3-clause
| 9,425
|
from collections import OrderedDict
from math import sqrt
import numpy as np
import pyqtgraph as pg
from qtpy import QtWidgets
import skrf
from . import smith_chart, util
class NetworkPlotWidget(QtWidgets.QWidget):
S_VALS = OrderedDict((
("decibels", "db"),
("magnitude", "mag"),
("phase (deg)", "deg"),
("phase unwrapped (deg)", "deg_unwrap"),
("phase (rad)", "rad"),
("phase unwrapped (rad)", "rad_unwrap"),
("real", "re"),
("imaginary", "im"),
("group delay", "group_delay"),
("vswr", "vswr")
))
S_UNITS = list(S_VALS.keys())
def __init__(self, parent=None, **kwargs):
super(NetworkPlotWidget, self).__init__(parent)
self.checkBox_useCorrected = QtWidgets.QCheckBox()
self.checkBox_useCorrected.setText("Plot Corrected")
self.checkBox_useCorrected.setEnabled(False)
self.comboBox_primarySelector = QtWidgets.QComboBox(self)
self.comboBox_primarySelector.addItems(("S", "Z", "Y", "A", "Smith Chart"))
self.comboBox_unitsSelector = QtWidgets.QComboBox(self)
self.comboBox_unitsSelector.addItems(self.S_UNITS)
self.comboBox_traceSelector = QtWidgets.QComboBox(self)
self.set_trace_items()
self.comboBox_traceSelector.setCurrentIndex(0)
self.plot_layout = pg.GraphicsLayoutWidget(self)
self.plot_layout.sceneObj.sigMouseClicked.connect(self.graph_clicked)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.addWidget(self.checkBox_useCorrected)
self.horizontalLayout.addWidget(self.comboBox_primarySelector)
self.horizontalLayout.addWidget(self.comboBox_unitsSelector)
self.horizontalLayout.addWidget(self.comboBox_traceSelector)
self.data_info_label = QtWidgets.QLabel("Click a data point to see info")
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setContentsMargins(3, 3, 3, 3) # normally this will be embedded in another application
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.plot_layout)
self.verticalLayout.addWidget(self.data_info_label)
self.checkBox_useCorrected.stateChanged.connect(self.set_use_corrected)
self.comboBox_primarySelector.currentIndexChanged.connect(self.update_plot)
self.comboBox_unitsSelector.currentIndexChanged.connect(self.update_plot)
self.comboBox_traceSelector.currentIndexChanged.connect(self.update_plot)
self.plot = self.plot_layout.addPlot() # type: pg.PlotItem
self._ntwk = None
self._ntwk_corrected = None
self._corrected_data_enabled = True
self._use_corrected = False
self.corrected_data_enabled = kwargs.get('corrected_data_enabled', True)
self.plot.addLegend()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
self.last_plot = "rectangular"
def get_use_corrected(self):
return self._use_corrected
def set_use_corrected(self, val):
if val in (1, 2):
self._use_corrected = True
else:
self._use_corrected = False
self.update_plot()
use_corrected = property(get_use_corrected, set_use_corrected)
@property
def ntwk(self): return self._ntwk
@ntwk.setter
def ntwk(self, ntwk):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self.set_trace_items(ntwk)
self._ntwk = ntwk
self.update_plot()
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
@property
def ntwk_corrected(self): return self._ntwk_corrected
@ntwk_corrected.setter
def ntwk_corrected(self, ntwk):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self.set_trace_items(ntwk)
self._ntwk_corrected = ntwk
self.update_plot()
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
@property
def corrected_data_enabled(self):
return self._corrected_data_enabled
@corrected_data_enabled.setter
def corrected_data_enabled(self, enabled):
if enabled is True:
self._corrected_data_enabled = True
self.checkBox_useCorrected.setEnabled(True)
else:
self._corrected_data_enabled = False
self._use_corrected = False
self.checkBox_useCorrected.setEnabled(False)
def set_networks(self, ntwk, ntwk_corrected=None):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self._ntwk = ntwk
self.set_trace_items(self._ntwk)
if ntwk is None:
self._ntwk_corrected = None
self.set_trace_items(self._ntwk)
return
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
if ntwk_corrected is None or isinstance(ntwk_corrected, skrf.Network) or type(ntwk_corrected) in (list, tuple):
self._ntwk_corrected = ntwk_corrected
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
self.update_plot()
def _calc_traces(self):
trace = self.comboBox_traceSelector.currentIndex()
n_ = m_ = 0
if trace > 0:
mn = trace - 1
nports = int(sqrt(self.comboBox_traceSelector.count() - 1))
m_ = mn % nports
n_ = int((mn - mn % nports) / nports)
return m_, n_, trace
def reset_plot(self, smith=False):
self.plot.clear()
if not smith and self.last_plot == "smith":
self.plot.setAspectLocked(False)
self.plot.autoRange()
self.plot.enableAutoRange()
self.plot.setLabel("bottom", "frequency", units="Hz")
if smith and not self.last_plot == "smith":
self.last_plot = "smith"
self.ZGrid = smith_chart.gen_z_grid()
self.s_unity_circle = smith_chart.gen_s_unity_circle()
self.plot_layout.removeItem(self.plot)
self.plot = self.plot_layout.addPlot()
self.plot.setAspectLocked()
self.plot.setXRange(-1, 1)
self.plot.setYRange(-1, 1)
if smith:
self.plot.addItem(self.s_unity_circle)
self.plot.addItem(self.ZGrid)
if not smith:
self.plot.setLabel("left", "")
self.plot.setTitle(None)
legend = self.plot.legend
if legend is not None:
legend.scene().removeItem(legend)
self.plot.legend = None
self.plot.addLegend()
def clear_plot(self):
self._ntwk = None
self._ntwk_corrected = None
self._ntwk_list = None
self.reset_plot()
def set_trace_items(self, ntwk=None):
self.comboBox_traceSelector.blockSignals(True)
current_index = self.comboBox_traceSelector.currentIndex()
nports = 0
if isinstance(ntwk, skrf.Network):
nports = ntwk.nports
elif type(ntwk) in (list, tuple):
for n in ntwk:
if n.nports > nports:
nports = n.nports
self.comboBox_traceSelector.clear()
self.comboBox_traceSelector.addItem("all")
for n in range(nports):
for m in range(nports):
self.comboBox_traceSelector.addItem("S{:d}{:d}".format(m + 1, n + 1))
if current_index <= self.comboBox_traceSelector.count():
self.comboBox_traceSelector.setCurrentIndex(current_index)
else:
self.comboBox_traceSelector.setCurrentIndex(0)
self.comboBox_traceSelector.blockSignals(False)
def graph_clicked(self, ev):
"""
:type ev: pg.GraphicsScene.mouseEvents.MouseClickEvent
:return:
"""
xy = self.plot.vb.mapSceneToView(ev.scenePos())
if not ev.isAccepted():
if "smith" in self.comboBox_primarySelector.currentText().lower():
S11 = xy.x() + 1j * xy.y()
Z = (1 + S11) / (1 - S11)
self.data_info_label.setText(
"Sre: {:g}, Sim: {:g} - R: {:g}, X: {:g}".format(xy.x(), xy.y(), Z.real, Z.imag))
else:
self.data_info_label.setText("x: {:g}, y: {:g}".format(xy.x(), xy.y()))
elif isinstance(ev.acceptedItem, pg.PlotCurveItem):
curve = ev.acceptedItem # type: pg.PlotCurveItem
spoint = xy.x() + 1j * xy.y()
sdata = curve.xData + 1j * curve.yData
index = np.argmin(np.abs(sdata - spoint))
frequency = curve.ntwk.frequency.f_scaled[index]
S11 = curve.xData[index] + 1j * curve.yData[index]
Z = (1 + S11) / (1 - S11)
self.data_info_label.setText(
"Freq: {:g} ({:s}), S(re): {:g}, S(im): {:g} - R: {:g}, X: {:g}".format(
frequency, curve.ntwk.frequency.unit, S11.real, S11.imag, Z.real, Z.imag))
def _plot_attr(self, ntwk, attr, colors, trace, n_, m_):
for n in range(ntwk.s.shape[2]):
for m in range(ntwk.s.shape[1]):
if trace > 0:
if not n == n_ or not m == m_:
continue
c = next(colors)
label = ntwk.name
param = "S{:d}{:d}".format(m + 1, n + 1)
if ntwk.s.shape[1] > 1:
label += " - " + param
if hasattr(ntwk, attr):
s = getattr(ntwk, attr)
if "db" in attr:
splot = pg.PlotDataItem(pen=pg.mkPen(c), name=label)
if not np.any(s[:, m, n] == -np.inf):
splot.setData(ntwk.f, s[:, m, n])
self.plot.addItem(splot)
else:
self.plot.plot(ntwk.f, s[:, m, n], pen=pg.mkPen(c), name=label)
else:
s = getattr(ntwk, param.lower(), None)
if s is None:
continue
if attr == 's_group_delay':
self.plot.plot(ntwk.f, abs(s.group_delay[:, 0, 0]), pen=pg.mkPen(c), name=label)
else:
attr = self.S_VALS[attr]
self.plot.plot(ntwk.f, getattr(s, attr)[:, 0, 0], pen=pg.mkPen(c), name=label)
def update_plot(self):
if self.corrected_data_enabled:
if self.ntwk_corrected:
self.checkBox_useCorrected.setEnabled(True)
else:
self.checkBox_useCorrected.setEnabled(False)
if "smith" in self.comboBox_primarySelector.currentText().lower():
self.plot_smith()
else:
self.plot_ntwk()
self.last_plot = "rectangular"
def plot_ntwk(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk = self.ntwk_corrected
else:
ntwk = self.ntwk
if ntwk is None:
return
elif type(ntwk) in (list, tuple):
self.plot_ntwk_list()
return
self.reset_plot()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
colors = util.trace_color_cycle(ntwk.s.shape[1] ** 2)
m_, n_, trace = self._calc_traces()
primary = self.comboBox_primarySelector.currentText().lower()
s_units = self.comboBox_unitsSelector.currentText()
attr = primary + "_" + self.S_VALS[s_units]
self._plot_attr(ntwk, attr, colors, trace, n_, m_)
self.plot.setLabel("left", s_units)
self.plot.setTitle(ntwk.name)
def plot_ntwk_list(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk_list = self.ntwk_corrected
else:
ntwk_list = self.ntwk
if ntwk_list is None:
return
self.reset_plot()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
colors = util.trace_color_cycle()
m_, n_, trace = self._calc_traces()
primary = self.comboBox_primarySelector.currentText().lower()
s_units = self.comboBox_unitsSelector.currentText()
attr = primary + "_" + self.S_VALS[s_units]
for ntwk in ntwk_list:
self._plot_attr(ntwk, attr, colors, trace, n_, m_)
self.plot.setLabel("left", s_units)
def _map_smith(self, ntwk, colors, trace, n_, m_):
for n in range(ntwk.s.shape[2]):
for m in range(ntwk.s.shape[1]):
if trace > 0:
if not n == n_ or not m == m_:
continue
c = next(colors)
label = ntwk.name
if ntwk.s.shape[1] > 1:
label += " - S{:d}{:d}".format(m + 1, n + 1)
s = ntwk.s[:, m, n]
curve = self.plot.plot(s.real, s.imag, pen=pg.mkPen(c), name=label)
curve.curve.setClickable(True)
curve.curve.ntwk = ntwk
def plot_smith(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk = self.ntwk_corrected
else:
ntwk = self.ntwk
if ntwk is None:
self.reset_plot(smith=True)
return
elif type(ntwk) in (list, tuple):
self.plot_smith_list()
return
self.reset_plot(smith=True)
colors = util.trace_color_cycle(ntwk.s.shape[1] ** 2)
m_, n_, trace = self._calc_traces()
self._map_smith(ntwk, colors, trace, n_, m_)
self.plot.setTitle(ntwk.name)
def plot_smith_list(self):
self.reset_plot(smith=True)
ntwk_list = self.ntwk
if ntwk_list is None:
return
colors = util.trace_color_cycle()
m_, n_, trace = self._calc_traces()
for ntwk in ntwk_list:
self._map_smith(ntwk, colors, trace, n_, m_)
|
temmeand/scikit-rf
|
qtapps/skrf_qtwidgets/networkPlotWidget.py
|
Python
|
bsd-3-clause
| 14,303
|
"""
``nanshe`` package, an image processing toolkit.
===============================================================================
Overview
===============================================================================
The ``nanshe`` package is an image processing package that contains a variety
of different techniques, which are used primarily to assemble the ADINA
algorithm proposed by Diego, et al.
( doi:`10.1109/ISBI.2013.6556660`_ ) to extract active neurons from
an image sequence. This algorithm uses online dictionary learning (a form of
matrix factorization) at its heart as implemented by Marial, et al.
( doi:`10.1145/1553374.1553463`_ ) to find a set of atoms (or basis
images) that are representative of an image sequence and can be used to
approximately reconstruct the sequence. However, it is designed in a modular
way so that a different matrix factorization could be swapped in and
appropriately parameterized. Other portions of the algorithm include a
preprocessing phase that has a variety of different techniques that can be
applied optionally. For example, removing registration artifacts from
a line-by-line registration algorithm, background subtraction, and a wavelet
transform to filter objects in a particular size.
===============================================================================
Installation
===============================================================================
-------------------------------------------------------------------------------
Dependencies
-------------------------------------------------------------------------------
Implementation of the algorithm has been done here in pure Python. However, a
few dependencies are required to get started. These include NumPy_, SciPy_,
h5py_, scikit-image_, SPAMS_, VIGRA_, and rank_filter_. The first 4 can be
found in standard distributions like Anaconda_. Installing VIGRA and
rank_filter can be done by using CMake_. SPAMS requires an existing BLAS/LAPACK
implementation. On Mac and Linux, this can be anything. Typically ATLAS_ is
used, but OpenBLAS_ or `Intel MKL`_ (if available) can be used, as well. This
will require modifying the setup.py script. On Windows, the setup.py links to
R_, which should be changed if another BLAS is available.
-------------------------------------------------------------------------------
Building
-------------------------------------------------------------------------------
Python
===============================================================================
As this package is pure Python, building follows through the standard method.
Currently, we require setuptools_ for installation; so, make sure it is
installed. Then simply issue the following command to build and install.
.. code-block:: sh
python setup.py install
Alternatively, one can build and then install in two steps if that is
preferable.
.. code-block:: sh
python setup.py build
python setup.py install
Conda
===============================================================================
Current packages can be found on our anaconda_ channel
( https://anaconda.org/nanshe/nanshe ). New ones are released every time a
passing tagged release is pushed to the ``master`` branch on GitHub. It is also
possible to build packages for conda_ for non-release commits as we do in our
continuous integration strategy.
To do this one requires the dependencies be installed or be available from a
anaconda channel. Additionally, one must be using the conda's ``root``
environment and have conda-build installed. Once this is done one need
only the run the following command with ``setup.py``.
.. code-block:: sh
python setup.py bdist_conda
Assuming this completes successfully, conda will provide the path to the built
package.
-------------------------------------------------------------------------------
Testing
-------------------------------------------------------------------------------
Running the test suite is fairly straightforward. Testing is done using nose_;
so, make sure you have a running copy if you wish to run the tests. Some of the
tests require drmaa_ installed and properly configured. If that is not the
case, those tests will be skipped automatically. To run the test suite, one
must be in the source directory. Then simply run the following command. This
will run all the tests and doctests. Depending on your machine, this will take
a few minutes to complete.
.. code-block:: sh
nosetests
The full test suite includes 3D tests, which are very slow to run and so are
not run by default. As the code has been written to be dimensionally agnostic,
these tests don't cover anything that the 2D tests don't already cover. To run
the 3D tests, simply use ``setup.all.cfg``.
.. code-block:: sh
nosetests -c setup.all.cfg
It is also possible to run this as part of the setup.py process. In which case,
this can be done as shown below. If 3D tests are required for this portion, one
need only replace ``setup.cfg`` with ``setup.all.cfg``.
.. code-block:: sh
python setup.py nosetests
Also, the typical ``test`` subcommand can be used to run ``nosetests``, but no
other arguments are allowed.
.. code-block:: sh
python setup.py test
-------------------------------------------------------------------------------
Documentation
-------------------------------------------------------------------------------
Current documentation can be found on the GitHub page
( http://nanshe-org.github.io/nanshe/ ). A new copy is rebuilt any time there is
a passing commit is added to the ``master`` branch. Each documentation commit
is added to ``gh-pages`` branch with a reference to the commit in ``master``
that triggered the build as well as the tag (version) if provided.
It is also possible to build the documentation from source. This project uses
Sphinx_ for generating documentation. Please make sure you have it installed.
In particular, a version from 1.3 or later is required. Additionally, the
`Cloud Sphinx Theme`_ is required for generating the documentation and is used
in the HTML layout.
The ``rst`` files (outside of ``index.rst`` are not distributed with the source
code. This is because it is trivial to generate them and it is to easy for the
code to become out of sync with documentation if they are distributed. However,
building ``rst`` files has been made a dependency of all other documentation
build steps so one does not have to think about this. The preferred method for
building documentation is to use the ``setup.py`` hook as shown below. This
will build the RST files and place them in ``docs/``. It will also build the
HTML files by default and put them in the directory ``build/sphinx/html/``.
Simply open the ``index.html`` file to take a look.
.. code-block:: sh
python setup.py build_sphinx
More build options can be determined by running the help command.
.. code-block:: sh
python setup.py build_sphinx --help
-------------------------------------------------------------------------------
Cleaning
-------------------------------------------------------------------------------
After any building operation a number of undesirable intermediate files are
created and left behind that one may wish to remove. To do this one merely
needs to run the clean command.
.. code-block:: sh
python setup.py clean
This has been modified to also remove RST files generated when building
documentation. However, it will leave any final build products like HTML files.
If one wishes to remove everything built (including final build products), the
clean all command will do this.
.. code-block:: sh
python setup.py clean --all
.. _`10.1109/ISBI.2013.6556660`: http://dx.doi.org/10.1109/ISBI.2013.6556660
.. _`10.1145/1553374.1553463`: http://dx.doi.org/10.1145/1553374.1553463
.. _NumPy: http://www.numpy.org/
.. _SciPy: http://www.scipy.org/
.. _h5py: http://www.h5py.org/
.. _scikit-image: http://scikit-image.org/
.. _SPAMS: http://spams-devel.gforge.inria.fr/
.. _VIGRA: http://ukoethe.github.io/vigra/
.. _rank_filter: http://github.com/nanshe-org/rank_filter/
.. _Anaconda: http://store.continuum.io/cshop/anaconda/
.. _CMake: http://www.cmake.org/
.. _ATLAS: http://math-atlas.sourceforge.net/
.. _OpenBLAS: http://www.openblas.net/
.. _`Intel MKL`: http://software.intel.com/en-us/intel-mkl
.. _R: http://www.r-project.org/
.. _setuptools: http://pythonhosted.org/setuptools/
.. _anaconda: https://anaconda.org/
.. _conda: http://conda.pydata.org/
.. _nose: http://nose.readthedocs.org/en/latest/
.. _drmaa: http://github.com/pygridtools/drmaa-python
.. _Sphinx: http://sphinx-doc.org/
.. _`Cloud Sphinx Theme`: https://pythonhosted.org/cloud_sptheme/
"""
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Dec 22, 2014 08:46:12 EST$"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = [
"box", "converter", "io", "imp", "learner", "registerer", "syn", "util"
]
from nanshe import box
from nanshe import converter
from nanshe import io
from nanshe import imp
from nanshe import learner
from nanshe import registerer
from nanshe import syn
from nanshe import util
|
DudLab/nanshe
|
nanshe/__init__.py
|
Python
|
bsd-3-clause
| 9,198
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
from telemetry.core import util
from telemetry.core import memory_cache_http_server
from telemetry.testing import tab_test_case
class RequestHandler(
memory_cache_http_server.MemoryCacheDynamicHTTPRequestHandler):
def ResponseFromHandler(self, path):
content = "Hello from handler"
return self.MakeResponse(content, "text/html", False)
class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
def setUp(self):
super(MemoryCacheHTTPServerTest, self).setUp()
self._test_filename = 'bear.webm'
test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
self._test_file_size = os.stat(test_file).st_size
def testBasicHostingAndRangeRequests(self):
self.Navigate('blank.html')
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
x = x.strip()
# Test basic html hosting.
self.assertEqual(x, 'Hello world')
file_size = self._test_file_size
last_byte = file_size - 1
# Test byte range request: no end byte.
self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
# Test byte range request: greater than zero start byte.
self.CheckContentHeaders('100-', '100-%d' % last_byte, file_size - 100)
# Test byte range request: explicit byte range.
self.CheckContentHeaders('2-500', '2-500', '499')
# Test byte range request: no start byte.
self.CheckContentHeaders('-228', '%d-%d' % (file_size - 228, last_byte),
'228')
# Test byte range request: end byte less than start byte.
self.CheckContentHeaders('100-5', '100-%d' % last_byte, file_size - 100)
def CheckContentHeaders(self, content_range_request, content_range_response,
content_length_response):
self._tab.ExecuteJavaScript(
"""
var loaded = false;
var xmlhttp = new XMLHttpRequest();
xmlhttp.onload = function(e) {
loaded = true;
};
// Avoid cached content by appending unique URL param.
xmlhttp.open('GET', {{ url }} + "?t=" + Date.now(), true);
xmlhttp.setRequestHeader('Range', {{ range }});
xmlhttp.send();
""",
url=self.UrlOfUnittestFile(self._test_filename),
range='bytes=%s' % content_range_request)
self._tab.WaitForJavaScriptCondition('loaded', timeout=5)
content_range = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Range");')
content_range_response = 'bytes %s/%d' % (content_range_response,
self._test_file_size)
self.assertEqual(content_range, content_range_response)
content_length = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Length");')
self.assertEqual(content_length, str(content_length_response))
def testAbsoluteAndRelativePathsYieldSameURL(self):
test_file_rel_path = 'green_rect.html'
test_file_abs_path = os.path.abspath(
os.path.join(util.GetUnittestDataDir(), test_file_rel_path))
# It's necessary to bypass self.UrlOfUnittestFile since that
# concatenates the unittest directory on to the incoming path,
# causing the same code path to be taken in both cases.
self._platform.SetHTTPServerDirectories(util.GetUnittestDataDir())
self.assertEqual(self._platform.http_server.UrlOf(test_file_rel_path),
self._platform.http_server.UrlOf(test_file_abs_path))
def testDynamicHTTPServer(self):
self.Navigate('test.html', handler_class=RequestHandler)
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
self.assertEqual(x, 'Hello from handler')
|
catapult-project/catapult
|
telemetry/telemetry/core/memory_cache_http_server_unittest.py
|
Python
|
bsd-3-clause
| 3,837
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
witcxc/scipy
|
scipy/cluster/hierarchy.py
|
Python
|
bsd-3-clause
| 94,372
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
from types import MappingProxyType
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology import parameters, realizations
def test_realizations_in_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`."""
d = dir(parameters)
assert set(d) == set(parameters.__all__)
for n in parameters.available:
assert n in d
@pytest.mark.parametrize("name", parameters.available)
def test_getting_parameters(name):
"""
Test getting 'parameters' and that it is derived from the corresponding
realization.
"""
params = getattr(parameters, name)
assert isinstance(params, MappingProxyType)
assert params["name"] == name
# Check parameters have the right keys and values
cosmo = getattr(realizations, name)
assert params["name"] == cosmo.name
assert params["cosmology"] == cosmo.__class__.__qualname__
# All the cosmology parameters are equal
for n in cosmo.__parameters__:
assert np.array_equal(params[n], getattr(cosmo, n))
# All the metadata is included. Parameter values take precedence, so only
# checking the keys.
assert set(cosmo.meta.keys()).issubset(params.keys())
# Lastly, check the generation process.
m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True)
assert params == m
|
pllim/astropy
|
astropy/cosmology/tests/test_parameters.py
|
Python
|
bsd-3-clause
| 1,438
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ResourceFinder is a helper class for finding resources given their name."""
import codecs
import os
from py_vulcanize import module
from py_vulcanize import style_sheet as style_sheet_module
from py_vulcanize import resource as resource_module
from py_vulcanize import html_module
from py_vulcanize import strip_js_comments
class ResourceLoader(object):
"""Manges loading modules and their dependencies from files.
Modules handle parsing and the construction of their individual dependency
pointers. The loader deals with bookkeeping of what has been loaded, and
mapping names to file resources.
"""
def __init__(self, project):
self.project = project
self.stripped_js_by_filename = {}
self.loaded_modules = {}
self.loaded_raw_scripts = {}
self.loaded_style_sheets = {}
self.loaded_images = {}
@property
def source_paths(self):
"""A list of base directories to search for modules under."""
return self.project.source_paths
def FindResource(self, some_path, binary=False):
"""Finds a Resource for the given path.
Args:
some_path: A relative or absolute path to a file.
Returns:
A Resource or None.
"""
if os.path.isabs(some_path):
return self.FindResourceGivenAbsolutePath(some_path, binary)
else:
return self.FindResourceGivenRelativePath(some_path, binary)
def FindResourceGivenAbsolutePath(self, absolute_path, binary=False):
"""Returns a Resource for the given absolute path."""
candidate_paths = []
for source_path in self.source_paths:
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return resource_module.Resource(longest_candidate, absolute_path, binary)
def FindResourceGivenRelativePath(self, relative_path, binary=False):
"""Returns a Resource for the given relative path."""
absolute_path = None
for script_path in self.source_paths:
absolute_path = os.path.join(script_path, relative_path)
if os.path.exists(absolute_path):
return resource_module.Resource(script_path, absolute_path, binary)
return None
def _FindResourceGivenNameAndSuffix(
self, requested_name, extension, return_resource=False):
"""Searches for a file and reads its contents.
Args:
requested_name: The name of the resource that was requested.
extension: The extension for this requested resource.
Returns:
A (path, contents) pair.
"""
pathy_name = requested_name.replace('.', os.sep)
filename = pathy_name + extension
resource = self.FindResourceGivenRelativePath(filename)
if return_resource:
return resource
if not resource:
return None, None
return _read_file(resource.absolute_path)
def FindModuleResource(self, requested_module_name):
"""Finds a module javascript file and returns a Resource, or none."""
js_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.js', return_resource=True)
html_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.html', return_resource=True)
if js_resource and html_resource:
if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames(
js_resource, html_resource):
return html_resource
return js_resource
elif js_resource:
return js_resource
return html_resource
def LoadModule(self, module_name=None, module_filename=None,
excluded_scripts=None):
assert bool(module_name) ^ bool(module_filename), (
'Must provide either module_name or module_filename.')
if module_filename:
resource = self.FindResource(module_filename)
if not resource:
raise Exception('Could not find %s in %s' % (
module_filename, repr(self.source_paths)))
module_name = resource.name
else:
resource = None # Will be set if we end up needing to load.
if module_name in self.loaded_modules:
assert self.loaded_modules[module_name].contents
return self.loaded_modules[module_name]
if not resource: # happens when module_name was given
resource = self.FindModuleResource(module_name)
if not resource:
raise module.DepsException('No resource for module "%s"' % module_name)
m = html_module.HTMLModule(self, module_name, resource)
self.loaded_modules[module_name] = m
# Fake it, this is probably either polymer.min.js or platform.js which are
# actually .js files....
if resource.absolute_path.endswith('.js'):
return m
m.Parse(excluded_scripts)
m.Load(excluded_scripts)
return m
def LoadRawScript(self, relative_raw_script_path):
resource = None
for source_path in self.source_paths:
possible_absolute_path = os.path.join(
source_path, os.path.normpath(relative_raw_script_path))
if os.path.exists(possible_absolute_path):
resource = resource_module.Resource(
source_path, possible_absolute_path)
break
if not resource:
raise module.DepsException(
'Could not find a file for raw script %s in %s' %
(relative_raw_script_path, self.source_paths))
assert relative_raw_script_path == resource.unix_style_relative_path, (
'Expected %s == %s' % (relative_raw_script_path,
resource.unix_style_relative_path))
if resource.absolute_path in self.loaded_raw_scripts:
return self.loaded_raw_scripts[resource.absolute_path]
raw_script = module.RawScript(resource)
self.loaded_raw_scripts[resource.absolute_path] = raw_script
return raw_script
def LoadStyleSheet(self, name):
if name in self.loaded_style_sheets:
return self.loaded_style_sheets[name]
resource = self._FindResourceGivenNameAndSuffix(
name, '.css', return_resource=True)
if not resource:
raise module.DepsException(
'Could not find a file for stylesheet %s' % name)
style_sheet = style_sheet_module.StyleSheet(self, name, resource)
style_sheet.load()
self.loaded_style_sheets[name] = style_sheet
return style_sheet
def LoadImage(self, abs_path):
if abs_path in self.loaded_images:
return self.loaded_images[abs_path]
if not os.path.exists(abs_path):
raise module.DepsException("url('%s') did not exist" % abs_path)
res = self.FindResourceGivenAbsolutePath(abs_path, binary=True)
if res is None:
raise module.DepsException("url('%s') was not in search path" % abs_path)
image = style_sheet_module.Image(res)
self.loaded_images[abs_path] = image
return image
def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize):
if filename in self.stripped_js_by_filename:
return self.stripped_js_by_filename[filename]
with open(filename, 'r') as f:
contents = f.read(4096)
if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents):
return None
s = strip_js_comments.StripJSComments(contents)
self.stripped_js_by_filename[filename] = s
return s
def _read_file(absolute_path):
"""Reads a file and returns a (path, contents) pair.
Args:
absolute_path: Absolute path to a file.
Raises:
Exception: The given file doesn't exist.
IOError: There was a problem opening or reading the file.
"""
if not os.path.exists(absolute_path):
raise Exception('%s not found.' % absolute_path)
f = codecs.open(absolute_path, mode='r', encoding='utf-8')
contents = f.read()
f.close()
return absolute_path, contents
|
endlessm/chromium-browser
|
third_party/catapult/common/py_vulcanize/py_vulcanize/resource_loader.py
|
Python
|
bsd-3-clause
| 7,961
|
# Copyright (C) 2009-2013 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
from ctypes import *
import ctypes.util
import os
import usb.backend
import usb.util
import sys
from usb.core import USBError
from usb._debug import methodtrace
import usb._interop as _interop
import logging
__author__ = 'Wander Lairson Costa'
__all__ = ['get_backend']
_logger = logging.getLogger('usb.backend.libusb0')
# usb.h
_PC_PATH_MAX = 4
if sys.platform.find('bsd') != -1 or sys.platform.find('mac') != -1 or \
sys.platform.find('darwin') != -1:
_PATH_MAX = 1024
elif sys.platform == 'win32' or sys.platform == 'cygwin':
_PATH_MAX = 511
else:
_PATH_MAX = os.pathconf('.', _PC_PATH_MAX)
# libusb-win32 makes all structures packed, while
# default libusb only does for some structures
# _PackPolicy defines the structure packing according
# to the platform.
class _PackPolicy(object):
pass
if sys.platform == 'win32' or sys.platform == 'cygwin':
_PackPolicy._pack_ = 1
# Data structures
class _usb_descriptor_header(Structure):
_pack_ = 1
_fields_ = [('blength', c_uint8),
('bDescriptorType', c_uint8)]
class _usb_string_descriptor(Structure):
_pack_ = 1
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wData', c_uint16)]
class _usb_endpoint_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bEndpointAddress', c_uint8),
('bmAttributes', c_uint8),
('wMaxPacketSize', c_uint16),
('bInterval', c_uint8),
('bRefresh', c_uint8),
('bSynchAddress', c_uint8),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_interface_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bInterfaceNumber', c_uint8),
('bAlternateSetting', c_uint8),
('bNumEndpoints', c_uint8),
('bInterfaceClass', c_uint8),
('bInterfaceSubClass', c_uint8),
('bInterfaceProtocol', c_uint8),
('iInterface', c_uint8),
('endpoint', POINTER(_usb_endpoint_descriptor)),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_interface(Structure, _PackPolicy):
_fields_ = [('altsetting', POINTER(_usb_interface_descriptor)),
('num_altsetting', c_int)]
class _usb_config_descriptor(Structure, _PackPolicy):
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('wTotalLength', c_uint16),
('bNumInterfaces', c_uint8),
('bConfigurationValue', c_uint8),
('iConfiguration', c_uint8),
('bmAttributes', c_uint8),
('bMaxPower', c_uint8),
('interface', POINTER(_usb_interface)),
('extra', POINTER(c_uint8)),
('extralen', c_int)]
class _usb_device_descriptor(Structure, _PackPolicy):
_pack_ = 1
_fields_ = [('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bcdUSB', c_uint16),
('bDeviceClass', c_uint8),
('bDeviceSubClass', c_uint8),
('bDeviceProtocol', c_uint8),
('bMaxPacketSize0', c_uint8),
('idVendor', c_uint16),
('idProduct', c_uint16),
('bcdDevice', c_uint16),
('iManufacturer', c_uint8),
('iProduct', c_uint8),
('iSerialNumber', c_uint8),
('bNumConfigurations', c_uint8)]
class _usb_device(Structure, _PackPolicy):
pass
class _usb_bus(Structure, _PackPolicy):
pass
_usb_device._fields_ = [('next', POINTER(_usb_device)),
('prev', POINTER(_usb_device)),
('filename', c_int8 * (_PATH_MAX + 1)),
('bus', POINTER(_usb_bus)),
('descriptor', _usb_device_descriptor),
('config', POINTER(_usb_config_descriptor)),
('dev', c_void_p),
('devnum', c_uint8),
('num_children', c_ubyte),
('children', POINTER(POINTER(_usb_device)))]
_usb_bus._fields_ = [('next', POINTER(_usb_bus)),
('prev', POINTER(_usb_bus)),
('dirname', c_char * (_PATH_MAX + 1)),
('devices', POINTER(_usb_device)),
('location', c_uint32),
('root_dev', POINTER(_usb_device))]
_usb_dev_handle = c_void_p
class _DeviceDescriptor:
def __init__(self, dev):
desc = dev.descriptor
self.bLength = desc.bLength
self.bDescriptorType = desc.bDescriptorType
self.bcdUSB = desc.bcdUSB
self.bDeviceClass = desc.bDeviceClass
self.bDeviceSubClass = desc.bDeviceSubClass
self.bDeviceProtocol = desc.bDeviceProtocol
self.bMaxPacketSize0 = desc.bMaxPacketSize0
self.idVendor = desc.idVendor
self.idProduct = desc.idProduct
self.bcdDevice = desc.bcdDevice
self.iManufacturer = desc.iManufacturer
self.iProduct = desc.iProduct
self.iSerialNumber = desc.iSerialNumber
self.bNumConfigurations = desc.bNumConfigurations
self.address = dev.devnum
self.bus = dev.bus[0].location
self.port_number = None
_lib = None
def _load_library():
if sys.platform != 'cygwin':
candidates = ('usb-0.1', 'usb', 'libusb0')
for candidate in candidates:
# Workaround for CPython 3.3 issue#16283 / pyusb #14
if sys.platform == 'win32':
candidate = candidate + '.dll'
libname = ctypes.util.find_library(candidate)
if libname is not None: break
else:
# corner cases
# cygwin predefines library names with 'cyg' instead of 'lib'
try:
return CDLL('cygusb0.dll')
except:
_logger.error('Libusb 0 could not be loaded in cygwin', exc_info=True)
raise OSError('USB library could not be found')
return CDLL(libname)
def _setup_prototypes(lib):
# usb_dev_handle *usb_open(struct usb_device *dev);
lib.usb_open.argtypes = [POINTER(_usb_device)]
lib.usb_open.restype = _usb_dev_handle
# int usb_close(usb_dev_handle *dev);
lib.usb_close.argtypes = [_usb_dev_handle]
# int usb_get_string(usb_dev_handle *dev,
# int index,
# int langid,
# char *buf,
# size_t buflen);
lib.usb_get_string.argtypes = [
_usb_dev_handle,
c_int,
c_int,
c_char_p,
c_size_t
]
# int usb_get_string_simple(usb_dev_handle *dev,
# int index,
# char *buf,
# size_t buflen);
lib.usb_get_string_simple.argtypes = [
_usb_dev_handle,
c_int,
c_char_p,
c_size_t
]
# int usb_get_descriptor_by_endpoint(usb_dev_handle *udev,
# int ep,
# unsigned char type,
# unsigned char index,
# void *buf,
# int size);
lib.usb_get_descriptor_by_endpoint.argtypes = [
_usb_dev_handle,
c_int,
c_ubyte,
c_ubyte,
c_void_p,
c_int
]
# int usb_get_descriptor(usb_dev_handle *udev,
# unsigned char type,
# unsigned char index,
# void *buf,
# int size);
lib.usb_get_descriptor.argtypes = [
_usb_dev_handle,
c_ubyte,
c_ubyte,
c_void_p,
c_int
]
# int usb_bulk_write(usb_dev_handle *dev,
# int ep,
# const char *bytes,
# int size,
# int timeout);
lib.usb_bulk_write.argtypes = [
_usb_dev_handle,
c_int,
c_char_p,
c_int,
c_int
]
# int usb_bulk_read(usb_dev_handle *dev,
# int ep,
# char *bytes,
# int size,
# int timeout);
lib.usb_bulk_read.argtypes = [
_usb_dev_handle,
c_int,
c_char_p,
c_int,
c_int
]
# int usb_interrupt_write(usb_dev_handle *dev,
# int ep,
# const char *bytes,
# int size,
# int timeout);
lib.usb_interrupt_write.argtypes = [
_usb_dev_handle,
c_int,
c_char_p,
c_int,
c_int
]
# int usb_interrupt_read(usb_dev_handle *dev,
# int ep,
# char *bytes,
# int size,
# int timeout);
lib.usb_interrupt_read.argtypes = [
_usb_dev_handle,
c_int,
c_char_p,
c_int,
c_int
]
# int usb_control_msg(usb_dev_handle *dev,
# int requesttype,
# int request,
# int value,
# int index,
# char *bytes,
# int size,
# int timeout);
lib.usb_control_msg.argtypes = [
_usb_dev_handle,
c_int,
c_int,
c_int,
c_int,
c_char_p,
c_int,
c_int
]
# int usb_set_configuration(usb_dev_handle *dev, int configuration);
lib.usb_set_configuration.argtypes = [_usb_dev_handle, c_int]
# int usb_claim_interface(usb_dev_handle *dev, int interface);
lib.usb_claim_interface.argtypes = [_usb_dev_handle, c_int]
# int usb_release_interface(usb_dev_handle *dev, int interface);
lib.usb_release_interface.argtypes = [_usb_dev_handle, c_int]
# int usb_set_altinterface(usb_dev_handle *dev, int alternate);
lib.usb_set_altinterface.argtypes = [_usb_dev_handle, c_int]
# int usb_resetep(usb_dev_handle *dev, unsigned int ep);
lib.usb_resetep.argtypes = [_usb_dev_handle, c_int]
# int usb_clear_halt(usb_dev_handle *dev, unsigned int ep);
lib.usb_clear_halt.argtypes = [_usb_dev_handle, c_int]
# int usb_reset(usb_dev_handle *dev);
lib.usb_reset.argtypes = [_usb_dev_handle]
# char *usb_strerror(void);
lib.usb_strerror.argtypes = []
lib.usb_strerror.restype = c_char_p
# void usb_set_debug(int level);
lib.usb_set_debug.argtypes = [c_int]
# struct usb_device *usb_device(usb_dev_handle *dev);
lib.usb_device.argtypes = [_usb_dev_handle]
lib.usb_device.restype = POINTER(_usb_device)
# struct usb_bus *usb_get_busses(void);
lib.usb_get_busses.restype = POINTER(_usb_bus)
def _check(retval):
if retval is None:
errmsg = _lib.usb_strerror()
else:
ret = int(retval)
if ret < 0:
errmsg = _lib.usb_strerror()
# No error means that we need to get the error
# message from the return code
# Thanks to Nicholas Wheeler to point out the problem...
# Also see issue #2860940
if errmsg.lower() == 'no error':
errmsg = os.strerror(-ret)
else:
return ret
raise USBError(errmsg, ret)
# implementation of libusb 0.1.x backend
class _LibUSB(usb.backend.IBackend):
@methodtrace(_logger)
def enumerate_devices(self):
_check(_lib.usb_find_busses())
_check(_lib.usb_find_devices())
bus = _lib.usb_get_busses()
while bool(bus):
dev = bus[0].devices
while bool(dev):
yield dev[0]
dev = dev[0].next
bus = bus[0].next
@methodtrace(_logger)
def get_device_descriptor(self, dev):
return _DeviceDescriptor(dev)
@methodtrace(_logger)
def get_configuration_descriptor(self, dev, config):
if config >= dev.descriptor.bNumConfigurations:
raise IndexError('Invalid configuration index ' + str(config))
return dev.config[config]
@methodtrace(_logger)
def get_interface_descriptor(self, dev, intf, alt, config):
cfgdesc = self.get_configuration_descriptor(dev, config)
if intf >= cfgdesc.bNumInterfaces:
raise IndexError('Invalid interface index ' + str(interface))
interface = cfgdesc.interface[intf]
if alt >= interface.num_altsetting:
raise IndexError('Invalid alternate setting index ' + str(alt))
return interface.altsetting[alt]
@methodtrace(_logger)
def get_endpoint_descriptor(self, dev, ep, intf, alt, config):
interface = self.get_interface_descriptor(dev, intf, alt, config)
if ep >= interface.bNumEndpoints:
raise IndexError('Invalid endpoint index ' + str(ep))
return interface.endpoint[ep]
@methodtrace(_logger)
def open_device(self, dev):
return _check(_lib.usb_open(dev))
@methodtrace(_logger)
def close_device(self, dev_handle):
_check(_lib.usb_close(dev_handle))
@methodtrace(_logger)
def set_configuration(self, dev_handle, config_value):
_check(_lib.usb_set_configuration(dev_handle, config_value))
@methodtrace(_logger)
def set_interface_altsetting(self, dev_handle, intf, altsetting):
_check(_lib.usb_set_altinterface(dev_handle, altsetting))
@methodtrace(_logger)
def get_configuration(self, dev_handle):
bmRequestType = usb.util.build_request_type(
usb.util.CTRL_IN,
usb.util.CTRL_TYPE_STANDARD,
usb.util.CTRL_RECIPIENT_DEVICE
)
return self.ctrl_transfer(dev_handle,
bmRequestType,
0x08,
0,
0,
1,
100
)[0]
@methodtrace(_logger)
def claim_interface(self, dev_handle, intf):
_check(_lib.usb_claim_interface(dev_handle, intf))
@methodtrace(_logger)
def release_interface(self, dev_handle, intf):
_check(_lib.usb_release_interface(dev_handle, intf))
@methodtrace(_logger)
def bulk_write(self, dev_handle, ep, intf, data, timeout):
return self.__write(_lib.usb_bulk_write,
dev_handle,
ep,
intf,
data, timeout)
@methodtrace(_logger)
def bulk_read(self, dev_handle, ep, intf, size, timeout):
return self.__read(_lib.usb_bulk_read,
dev_handle,
ep,
intf,
size,
timeout)
@methodtrace(_logger)
def intr_write(self, dev_handle, ep, intf, data, timeout):
return self.__write(_lib.usb_interrupt_write,
dev_handle,
ep,
intf,
data,
timeout)
@methodtrace(_logger)
def intr_read(self, dev_handle, ep, intf, size, timeout):
return self.__read(_lib.usb_interrupt_read,
dev_handle,
ep,
intf,
size,
timeout)
@methodtrace(_logger)
def ctrl_transfer(self,
dev_handle,
bmRequestType,
bRequest,
wValue,
wIndex,
data_or_wLength,
timeout):
if usb.util.ctrl_direction(bmRequestType) == usb.util.CTRL_OUT:
address, length = data_or_wLength.buffer_info()
length *= data_or_wLength.itemsize
return _check(_lib.usb_control_msg(
dev_handle,
bmRequestType,
bRequest,
wValue,
wIndex,
cast(address, c_char_p),
length,
timeout
))
else:
data = _interop.as_array((0,) * data_or_wLength)
read = int(_check(_lib.usb_control_msg(
dev_handle,
bmRequestType,
bRequest,
wValue,
wIndex,
cast(data.buffer_info()[0],
c_char_p),
data_or_wLength,
timeout
)))
return data[:read]
@methodtrace(_logger)
def reset_device(self, dev_handle):
_check(_lib.usb_reset(dev_handle))
@methodtrace(_logger)
def detach_kernel_driver(self, dev_handle, intf):
_check(_lib.usb_detach_kernel_driver_np(dev_handle, intf))
def __write(self, fn, dev_handle, ep, intf, data, timeout):
address, length = data.buffer_info()
length *= data.itemsize
return int(_check(fn(
dev_handle,
ep,
cast(address, c_char_p),
length,
timeout
)))
def __read(self, fn, dev_handle, ep, intf, size, timeout):
data = _interop.as_array('\x00' * size)
address, length = data.buffer_info()
length *= data.itemsize
ret = int(_check(fn(
dev_handle,
ep,
cast(address, c_char_p),
length,
timeout
)))
return data[:ret]
def get_backend():
global _lib
try:
if _lib is None:
_lib = _load_library()
_setup_prototypes(_lib)
_lib.usb_init()
return _LibUSB()
except Exception:
_logger.error('Error loading libusb 0.1 backend', exc_info=True)
return None
|
drmelectronic/Despacho
|
usb/backend/libusb0.py
|
Python
|
bsd-3-clause
| 20,780
|
"""
Provide basic components for groupby. These definitions
hold the allowlist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
from __future__ import annotations
import dataclasses
from typing import Hashable
@dataclasses.dataclass(order=True, frozen=True)
class OutputKey:
label: Hashable
position: int
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
plotting_methods = frozenset(["plot", "hist"])
common_apply_allowlist = (
frozenset(
[
"quantile",
"fillna",
"mad",
"take",
"idxmax",
"idxmin",
"tshift",
"skew",
"corr",
"cov",
"diff",
]
)
| plotting_methods
)
series_apply_allowlist: frozenset[str] = (
common_apply_allowlist
| frozenset(
{"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
)
) | frozenset(["dtype", "unique"])
dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset(
["dtypes", "corrwith"]
)
# cythonized transformations or canned "agg+broadcast", which do not
# require postprocessing of the result by transform.
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
# List of aggregation/reduction functions.
# These map each group to a single numeric value
reduction_kernels = frozenset(
[
"all",
"any",
"corrwith",
"count",
"first",
"idxmax",
"idxmin",
"last",
"mad",
"max",
"mean",
"median",
"min",
"ngroup",
"nth",
"nunique",
"prod",
# as long as `quantile`'s signature accepts only
# a single quantile value, it's a reduction.
# GH#27526 might change that.
"quantile",
"sem",
"size",
"skew",
"std",
"sum",
"var",
]
)
# List of transformation functions.
# a transformation is a function that, for each group,
# produces a result that has the same shape as the group.
transformation_kernels = frozenset(
[
"backfill",
"bfill",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"diff",
"ffill",
"fillna",
"pad",
"pct_change",
"rank",
"shift",
"tshift",
]
)
# these are all the public methods on Grouper which don't belong
# in either of the above lists
groupby_other_methods = frozenset(
[
"agg",
"aggregate",
"apply",
"boxplot",
# corr and cov return ngroups*ncolumns rows, so they
# are neither a transformation nor a reduction
"corr",
"cov",
"describe",
"dtypes",
"expanding",
"ewm",
"filter",
"get_group",
"groups",
"head",
"hist",
"indices",
"ndim",
"ngroups",
"ohlc",
"pipe",
"plot",
"resample",
"rolling",
"tail",
"take",
"transform",
"sample",
]
)
# Valid values of `name` for `groupby.transform(name)`
# NOTE: do NOT edit this directly. New additions should be inserted
# into the appropriate list above.
transform_kernel_allowlist = reduction_kernels | transformation_kernels
|
rs2/pandas
|
pandas/core/groupby/base.py
|
Python
|
bsd-3-clause
| 3,488
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from geopy.distance import great_circle
from s2sphere import Cell, CellId, LatLng
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.item_list import Item
from pokemongo_bot.walkers.polyline_walker import PolylineWalker
from pokemongo_bot.walkers.step_walker import StepWalker
from pokemongo_bot.worker_result import WorkerResult
class PokemonHunter(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(PokemonHunter, self).__init__(bot, config)
def initialize(self):
self.destination = None
self.walker = None
self.search_cell_id = None
self.search_points = []
self.lost_counter = 0
self.no_log_until = 0
self.config_max_distance = self.config.get("max_distance", 2000)
self.config_hunt_all = self.config.get("hunt_all", False)
self.config_hunt_vip = self.config.get("hunt_vip", True)
self.config_hunt_pokedex = self.config.get("hunt_pokedex", True)
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
if self.get_pokeball_count() <= 0:
self.destination = None
self.last_cell_id = None
return WorkerResult.SUCCESS
now = time.time()
pokemons = self.get_nearby_pokemons()
if self.destination is None:
worth_pokemons = self.get_worth_pokemons(pokemons)
if len(worth_pokemons) > 0:
self.destination = worth_pokemons[0]
self.lost_counter = 0
self.logger.info("New destination at %(distance).2f meters: %(name)s", self.destination)
self.no_log_until = now + 60
if self.destination["s2_cell_id"] != self.search_cell_id:
self.search_points = self.get_search_points(self.destination["s2_cell_id"])
self.walker = PolylineWalker(self.bot, self.search_points[0][0], self.search_points[0][1])
self.search_cell_id = self.destination["s2_cell_id"]
self.search_points = self.search_points[1:] + self.search_points[:1]
else:
if self.no_log_until < now:
self.logger.info("There is no nearby pokemon worth hunting down [%s]", ", ".join(p["name"] for p in pokemons))
self.no_log_until = now + 120
self.last_cell_id = None
return WorkerResult.SUCCESS
if any(self.destination["encounter_id"] == p["encounter_id"] for p in self.bot.cell["catchable_pokemons"] + self.bot.cell["wild_pokemons"]):
self.destination = None
elif self.walker.step():
if not any(self.destination["encounter_id"] == p["encounter_id"] for p in pokemons):
self.lost_counter += 1
else:
self.lost_counter = 0
if self.lost_counter >= 3:
self.destination = None
else:
self.logger.info("Now searching for %(name)s", self.destination)
self.walker = StepWalker(self.bot, self.search_points[0][0], self.search_points[0][1])
self.search_points = self.search_points[1:] + self.search_points[:1]
elif self.no_log_until < now:
distance = great_circle(self.bot.position, (self.walker.dest_lat, self.walker.dest_lng)).meters
self.logger.info("Moving to destination at %s meters: %s", round(distance, 2), self.destination["name"])
self.no_log_until = now + 30
return WorkerResult.RUNNING
def get_pokeball_count(self):
return sum([inventory.items().get(ball.value).count for ball in [Item.ITEM_POKE_BALL, Item.ITEM_GREAT_BALL, Item.ITEM_ULTRA_BALL]])
def get_nearby_pokemons(self):
radius = self.config_max_distance
pokemons = [p for p in self.bot.cell["nearby_pokemons"] if self.get_distance(self.bot.start_position, p) <= radius]
for pokemon in pokemons:
pokemon["distance"] = self.get_distance(self.bot.position, p)
pokemon["name"] = inventory.pokemons().name_for(pokemon["pokemon_id"])
pokemons.sort(key=lambda p: p["distance"])
return pokemons
def get_worth_pokemons(self, pokemons):
if self.config_hunt_all:
worth_pokemons = pokemons
else:
worth_pokemons = []
if self.config_hunt_vip:
worth_pokemons += [p for p in pokemons if p["name"] in self.bot.config.vips]
if self.config_hunt_pokedex:
worth_pokemons += [p for p in pokemons if (p not in worth_pokemons) and any(not inventory.pokedex().seen(fid) for fid in self.get_family_ids(p))]
worth_pokemons.sort(key=lambda p: inventory.candies().get(p["pokemon_id"]).quantity)
return worth_pokemons
def get_family_ids(self, pokemon):
family_id = inventory.pokemons().data_for(pokemon["pokemon_id"]).first_evolution_id
ids = [family_id]
ids += inventory.pokemons().data_for(family_id).next_evolutions_all[:]
return ids
def get_distance(self, location, pokemon):
return great_circle(location, (pokemon["latitude"], pokemon["longitude"])).meters
def get_search_points(self, cell_id):
points = []
# For cell level 15
for c in Cell(CellId(cell_id)).subdivide():
for cc in c.subdivide():
latlng = LatLng.from_point(cc.get_center())
point = (latlng.lat().degrees, latlng.lng().degrees)
points.append(point)
points[0], points[1] = points[1], points[0]
points[14], points[15] = points[15], points[14]
point = points.pop(2)
points.insert(7, point)
point = points.pop(13)
points.insert(8, point)
closest = min(points, key=lambda p: great_circle(self.bot.position, p).meters)
index = points.index(closest)
return points[index:] + points[:index]
|
cmezh/PokemonGo-Bot
|
pokemongo_bot/cell_workers/pokemon_hunter.py
|
Python
|
mit
| 6,128
|
from __future__ import print_function
from .Console import *
|
pbmanis/acq4
|
acq4/modules/Console/__init__.py
|
Python
|
mit
| 60
|
import hashlib
import json
from PIL import Image
from django.http import HttpResponse
from django.shortcuts import render
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from crits.core.class_mapper import class_from_id
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.handlers import build_jtable, jtable_ajax_list,jtable_ajax_delete
from crits.core.user_tools import user_sources
from crits.screenshots.screenshot import Screenshot
def get_screenshots_for_id(type_, _id, analyst, buckets=False):
"""
Get screenshots for a top-level object.
:param type_: The class type.
:type type_: str
:param _id: The ObjectId to lookup.
:type _id: str
:param analyst: The user looking up the screenshots.
:type analyst: str
:param buckets: Use buckets as tag lookups for screenshots.
:type buckets: boolean
:returns: list
"""
result = {'success': False}
sources = user_sources(analyst)
obj = class_from_id(type_, _id)
if not obj:
result['message'] = "No valid top-level object found."
return result
screenshots = Screenshot.objects(id__in=obj.screenshots,
source__name__in=sources)
bucket_shots = Screenshot.objects(tags__in=obj.bucket_list,
source__name__in=sources)
final_shots = []
for s in screenshots:
if s.screenshot and s.thumb and s not in final_shots:
final_shots.append(s)
for b in bucket_shots:
if b not in final_shots:
# since .bucket isn't supported, this will show up in the template
# under unsupported_attrs, which is ok.
b.bucket = True
final_shots.append(b)
result['success'] = True
result['screenshots'] = final_shots
return result
def get_screenshot(_id=None, tag=None, analyst=None, thumb=False):
"""
Get a screenshot.
:param _id: The ObjectId to lookup.
:type _id: str
:param tag: The tag to look for.
:type tag: str
:param analyst: The user looking up the screenshots.
:type analyst: str
:returns: screenshot
"""
if not analyst:
return None
sources = user_sources(analyst)
if _id:
screenshot = Screenshot.objects(id=_id,
source__name__in=sources).first()
if tag:
screenshot = Screenshot.objects(tags=tag,
source__name__in=sources).first()
if not screenshot:
return None
if thumb:
im = Image.open(screenshot.thumb)
else:
im = Image.open(screenshot.screenshot)
response = HttpResponse(content_type="image/png")
im.save(response, "PNG")
return response
def add_screenshot(description, tags, source, method, reference, tlp, analyst,
screenshot, screenshot_ids, oid, otype):
"""
Add a screenshot or screenshots to a top-level object.
:param description: The description of the screenshot.
:type description: str
:param tags: Tags associated with this screenshot.
:type tags: str, list
:param source: The source who provided the screenshot.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The method of acquiring this screenshot.
:type method: str
:param reference: A reference to the source of this screenshot.
:type reference: str
:param tlp: The TLP Sharing of this screenshot.
:type tlp: str
:param analyst: The user adding the screenshot.
:type analyst: str
:param screenshot: The screenshot to add.
:type screenshot: file handle
:param screenshot_ids: A list of ObjectIds of existing screenshots to add.
:type screenshot_ids: str, list
:param oid: The ObjectId of the top-level object to add to.
:type oid: str
:param otype: The top-level object type.
:type otype: str
:returns: dict with keys:
'success' (boolean),
'message' (str),
'id' (str) if successful,
'html' (str) if successful,
"""
result = {'success': False}
if not source:
result['message'] = "Must provide a source"
return result
obj = class_from_id(otype, oid)
if not obj:
result['message'] = "Could not find the top-level object."
return result
final_screenshots = []
if screenshot_ids:
if not isinstance(screenshot_ids, list):
screenshot_list = screenshot_ids.split(',')
else:
screenshot_list = screenshot_ids
for screenshot_id in screenshot_list:
screenshot_id = screenshot_id.strip().lower()
s = Screenshot.objects(id=screenshot_id).first()
if s:
if isinstance(source, basestring) and len(source) > 0:
s_embed = create_embedded_source(source, method=method,
reference=reference,
analyst=analyst,
tlp=tlp)
s.add_source(s_embed)
elif isinstance(source, EmbeddedSource):
s.add_source(source=source, method=method,
reference=reference, analyst=analyst, tlp=tlp)
elif isinstance(source, list) and len(source) > 0:
for x in source:
if isinstance(x, EmbeddedSource):
s.add_source(x, method=method, reference=reference,
analyst=analyst, tlp=tlp)
s.add_tags(tags)
s.save()
obj.screenshots.append(screenshot_id)
obj.save()
final_screenshots.append(s)
else:
md5 = hashlib.md5(screenshot.read()).hexdigest()
check = Screenshot.objects(md5=md5).first()
if check:
s = check
s.add_tags(tags)
else:
s = Screenshot()
s.analyst = analyst
s.description = description
s.md5 = md5
screenshot.seek(0)
s.add_screenshot(screenshot, tags)
if isinstance(source, basestring) and len(source) > 0:
s_embed = create_embedded_source(source, method=method,
reference=reference,
analyst=analyst,
tlp=tlp)
s.add_source(s_embed)
elif isinstance(source, EmbeddedSource):
s.add_source(source, method=method, reference=reference,
analyst=analyst, tlp=tlp)
elif isinstance(source, list) and len(source) > 0:
for x in source:
if isinstance(x, EmbeddedSource):
s.add_source(x, method=method, reference=reference,
analyst=analyst, tlp=tlp)
if not s.screenshot and not s.thumb:
result['message'] = "Problem adding screenshot to GridFS. No screenshot uploaded."
return result
try:
s.save(username=analyst)
final_screenshots.append(s)
except Exception, e:
result['message'] = str(e)
return result
obj.screenshots.append(str(s.id))
obj.save(username=analyst)
result['message'] = "Screenshot(s) successfully uploaded!"
result['id'] = str(s.id)
final_html = ""
for f in final_screenshots:
final_html += create_screenshot_html(f, oid, otype)
result['html'] = final_html
result['success'] = True
return result
def create_screenshot_html(s, oid, otype):
"""
Create HTML for a thumbnail view for the screenshot.
:param s: The screenshot.
:type s: :class:`crits.screenshots.screenshot.Screenshot`
:param oid: The ObjectId of the top-level object it's associating with.
:type oid: str
:param otype: The type of top-level object it's associating with.
:returns: str
"""
if s.tags and s.description:
description = s.description + ": " + ','.join(s.tags)
else:
description = s.md5
description += " (submitted by %s)" % s.analyst
html = '<a href="%s" title="%s" data-id="%s" data-dialog><img class="ss_no_bucket" src="%s">' % \
(reverse('crits-screenshots-views-render_screenshot',
args=[s.id]),
description,
str(s.id),
reverse('crits-screenshots-views-render_screenshot',
args=[s.id, 'thumb']))
html += '<span class="remove_screenshot ui-icon ui-icon-trash" data-id="'
html += '%s" data-obj="%s" data-type="%s" title="Remove from %s">' % (str(s.id),
oid,
otype,
otype)
html += '</span><span class="copy_ss_id ui-icon ui-icon-radio-on" '
html += 'data-id="%s" title="Copy ID to clipboard"></span>' % str(s.id)
return html
def delete_screenshot_from_object(obj, oid, sid, analyst):
"""
Remove a screenshot from a top-level object.
:param obj: The type of top-level object to work with.
:type obj: str
:param oid: The ObjectId of the top-level object to work with.
:type oid: str
:param sid: The ObjectId of the screenshot to remove.
:type sid: str
:param analyst: The user removing the screenshot.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
result = {'success': False}
klass = class_from_id(obj, oid)
if not klass:
result['message'] = "Could not find Object to delete screenshot from."
return result
clean = [s for s in klass.screenshots if s != sid]
klass.screenshots = clean
try:
klass.save(username=analyst)
result['success'] = True
return result
except Exception, e:
result['message'] = str(e)
return result
def generate_screenshot_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Screenshot
type_ = "screenshot"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Screenshots",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits-%ss-views-%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits-%ss-views-%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = []
if option == "inline":
return render(request, "jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
)
else:
return render(request, "%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
)
|
Magicked/crits
|
crits/screenshots/handlers.py
|
Python
|
mit
| 13,241
|
import sublime
if int(sublime.version()) < 3000:
import ghci
import ghcmod
import haskell_docs
import hdevtools
import sublime_haskell_common as common
import symbols
else:
import SublimeHaskell.ghci as ghci
import SublimeHaskell.ghcmod as ghcmod
import SublimeHaskell.haskell_docs as haskell_docs
import SublimeHaskell.hdevtools as hdevtools
import SublimeHaskell.sublime_haskell_common as common
import SublimeHaskell.symbols as symbols
def symbol_info(filename, module_name, symbol_name, cabal = None, no_ghci = False):
result = None
if hdevtools.hdevtools_enabled():
result = hdevtools.hdevtools_info(filename, symbol_name, cabal = cabal)
if not result and ghcmod.ghcmod_enabled():
result = ghcmod.ghcmod_info(filename, module_name, symbol_name, cabal = cabal)
if not result and not filename and not no_ghci:
result = ghci.ghci_info(module_name, symbol_name, cabal = cabal)
return result
def load_docs(decl):
"""
Tries to load docs for decl
"""
if decl.docs is None:
decl.docs = haskell_docs.haskell_docs(decl.module.name, decl.name)
def refine_type(decl, no_ghci = True):
"""
Refine type for sources decl
"""
if decl.location:
if decl.what == 'function' and not decl.type:
info = symbol_info(decl.location.filename, decl.module.name, decl.name, None, no_ghci = no_ghci)
if info:
decl.type = info.type
def refine_decl(decl):
"""
Refine decl information.
"""
# Symbol from cabal, try to load detailed info with ghci
if not decl.location:
load_docs(decl)
if decl.what == 'declaration':
decl_detailed = ghci.ghci_info(decl.module.name, decl.name)
if decl_detailed:
decl.__dict__.update(decl_detailed.__dict__)
# Symbol from sources, concrete type if it's not specified
else:
refine_type(decl, False)
def browse_module(module_name, cabal = None):
"""
Returns symbols.Module with all declarations
"""
return ghcmod.ghcmod_browse_module(module_name, cabal = cabal)
|
hcarvalhoalves/SublimeHaskell
|
util.py
|
Python
|
mit
| 2,159
|
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
apply(Exception.__init__, (self,)+args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific comamnds, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', `self.welcome`
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', `line`
self.sock.send(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', `line`
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', `line`
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', `resp`
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
list.append(line)
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp()
def newgroups(self, date, time):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time)
def newnews(self, group, date, time):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of article ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd)
def list(self):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST')
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP')
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the article id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id):
"""Process a BODY command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body"""
return self.artcmd('BODY ' + id)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self,start,end):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
def _test():
"""Minimal test function."""
s = NNTP('news', readermode='reader')
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
# Run the test when run as a script
if __name__ == '__main__':
_test()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/nntplib.py
|
Python
|
mit
| 18,078
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
# guaranteed to exist even on RHEL 5 because we now require python-hashlib
import hashlib
import re
import shutil
import pwd
import urlparse
import inspect
from config_common.rhn_log import log_debug
hashlib_has_usedforsecurity = False
if 'usedforsecurity' in inspect.getargspec(hashlib.new)[0]:
hashlib_has_usedforsecurity = True
_normpath_re = re.compile("^(%s)+" % os.sep)
def normalize_path(path):
"""
os.path.normpath does not remove path separator duplicates at the
beginning of the path
"""
return _normpath_re.sub(os.sep, os.path.normpath(path))
def join_path(*args):
return normalize_path(os.sep.join(args))
def path_full_split(path):
"""
Given a path, it fully splits it into constituent path
components (as opposed to os.path.split which splits it into
trailing component and preceeding path
"""
path = normalize_path(path)
splitpath = []
while 1:
path, current = os.path.split(path)
if current == '':
if path:
# Absolute path
splitpath.append(os.sep)
break
splitpath.append(current)
splitpath.reverse()
return splitpath
def copyfile_p(src, dst):
"""
Simple util function, copies src path to dst path, making
directories as necessary. File permissions are not preserved.
"""
directory = os.path.split(dst)[0]
try:
os.makedirs(directory)
except OSError, e:
if e.errno != 17:
# not File exists
raise
if os.path.isdir(src):
if not os.path.exists(dst):
os.mkdir(dst)
elif os.path.islink(src):
exists = hasattr(os.path, "lexists") and os.path.lexists or os.path.exists
if exists(dst):
os.remove(dst)
os.symlink(os.readlink(src), dst)
else:
shutil.copyfile(src, dst)
def mkdir_p(path, mode=None, symlinks=None, allfiles=None):
"""
Similar to 'mkdir -p' -- makes all directories necessary to ensure
the 'path' is a directory, and return the list of directories that were
made as a result
"""
if mode is None:
mode = 0700
dirs_created = []
components = path_full_split(path)
for i in range(1,len(components)):
d = os.path.join(*components[:i+1])
if symlinks:
for symlink in symlinks:
if symlink['path'] == d:
# create symlink and remove it from symlink list
os.symlink(symlink['symlink'], symlink['path'])
symlinks.remove(symlink)
allfiles.remove(symlink)
dirs_created.append(symlink)
continue
log_debug(8, "testing",d)
try:
os.mkdir(d, mode)
except OSError, e:
if e.errno != 17:
raise
else:
log_debug(8, "created",d)
dirs_created.append(d)
log_debug(6, "dirs_created:",dirs_created)
return dirs_created
def rmdir_p(path, stoppath):
"""
if rmdir had a -p option, this would be it. remove dir and up
until empty dir is hit, or stoppath is reached
path and stoppath have to be absolute paths
"""
# First normalize both paths
stoppath = normalize_path(os.sep + stoppath)
path = normalize_path(os.sep + path)
# stoppath has to be a prefix of path
if path[:len(stoppath)] != stoppath:
raise OSError, "Could not remove %s: %s is not a prefix" % (
path, stoppath)
while 1:
if stoppath == path:
# We're done
break
# Try to remove the directory
try:
os.rmdir(path)
except OSError:
# Either the directory is full, or we don't have permissions; stop
break
path, current = os.path.split(path)
if current == '':
# We're done - reached the root
break
#returns slashstring with any trailing slash removed
def rm_trailing_slash(slashstring):
if slashstring[-1] == "/":
slashstring = slashstring[0:-1]
return slashstring
def getContentChecksum(checksum_type, contents):
if hashlib_has_usedforsecurity:
engine = hashlib.new(checksum_type, usedforsecurity=False)
else:
engine = hashlib.new(checksum_type)
engine.update(contents)
return engine.hexdigest()
def sha256_file(filename):
engine = hashlib.new('sha256')
fh = open(filename, "r")
while 1:
buf = fh.read(4096)
if not buf:
break
engine.update(buf)
return engine.hexdigest()
def parse_url(server_url, scheme="https"):
return urlparse.urlparse(server_url, scheme=scheme)
def unparse_url(url_tuple):
return urlparse.urlunparse(url_tuple)
def get_home_dir():
uid = os.getuid()
ent = pwd.getpwuid(uid)
return ent[5]
|
aronparsons/spacewalk
|
client/tools/rhncfg/config_common/utils.py
|
Python
|
gpl-2.0
| 5,530
|
from django.test import TestCase
from common.templatetags.verbose_name import verbose_name
from users.models import SystersUser
class TemplateTagsTestCase(TestCase):
def test_verbose_names(self):
"""Test verbose_name template tag"""
self.assertEqual(verbose_name(SystersUser, "homepage_url"), "Homepage")
|
willingc/portal
|
systers_portal/common/tests/test_templatetags.py
|
Python
|
gpl-2.0
| 328
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
mikemcdaid/getonupband
|
sites/all/themes/getonupband/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py
|
Python
|
gpl-2.0
| 48,784
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.unlink(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.unlink(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
|
bit-trade-one/SoundModuleAP
|
lib-src/lv2/sratom/waflib/Scripting.py
|
Python
|
gpl-2.0
| 10,970
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'urssus/ui/configdialog.ui'
#
# Created: Fri Feb 27 23:57:10 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(600, 319)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/urssus.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.tabs = QtGui.QTabWidget(Dialog)
self.tabs.setObjectName("tabs")
self.tab1 = QtGui.QWidget()
self.tab1.setObjectName("tab1")
self.tabs.addTab(self.tab1, "")
self.verticalLayout.addWidget(self.tabs)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.tabs.setTabText(self.tabs.indexOf(self.tab1), QtGui.QApplication.translate("Dialog", "tab1", None, QtGui.QApplication.UnicodeUTF8))
import icons_rc
|
ralsina/urssus
|
urssus/ui/Ui_configdialog.py
|
Python
|
gpl-2.0
| 1,833
|
#!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import os
import gviz_api
import webrtc.data_helper
def main():
"""
This Python script displays a web page with test created with the
video_quality_measurement program, which is a tool in WebRTC.
The script requires on two external files and one Python library:
- A HTML template file with layout and references to the json variables
defined in this script
- A data file in Python format, containing the following:
- test_configuration - a dictionary of test configuration names and values.
- frame_data_types - a dictionary that maps the different metrics to their
data types.
- frame_data - a list of dictionaries where each dictionary maps a metric to
it's value.
- The gviz_api.py of the Google Visualization Python API, available at
http://code.google.com/p/google-visualization-python/
The HTML file is shipped with the script, while the data file must be
generated by running video_quality_measurement with the --python flag
specified.
"""
print 'Content-type: text/html\n' # the newline is required!
page_template_filename = '../templates/chart_page_template.html'
# The data files must be located in the project tree for app engine being
# able to access them.
data_filenames = ['../data/vp8_sw.py', '../data/vp8_hw.py']
# Will contain info/error messages to be displayed on the resulting page.
messages = []
# Load the page HTML template.
try:
f = open(page_template_filename)
page_template = f.read()
f.close()
except IOError as e:
ShowErrorPage('Cannot open page template file: %s<br>Details: %s' %
(page_template_filename, e))
return
# Read data from external Python script files. First check that they exist.
for filename in data_filenames:
if not os.path.exists(filename):
messages.append('Cannot open data file: %s' % filename)
data_filenames.remove(filename)
# Read data from all existing input files.
data_list = []
test_configurations = []
names = []
for filename in data_filenames:
read_vars = {} # empty dictionary to load the data into.
execfile(filename, read_vars, read_vars)
test_configuration = read_vars['test_configuration']
table_description = read_vars['frame_data_types']
table_data = read_vars['frame_data']
# Verify the data in the file loaded properly.
if not table_description or not table_data:
messages.append('Invalid input file: %s. Missing description list or '
'data dictionary variables.' % filename)
continue
# Frame numbers appear as number type in the data, but Chart API requires
# values of the X-axis to be of string type.
# Change the frame_number column data type:
table_description['frame_number'] = ('string', 'Frame number')
# Convert all the values to string types:
for row in table_data:
row['frame_number'] = str(row['frame_number'])
# Store the unique data from this file in the high level lists.
test_configurations.append(test_configuration)
data_list.append(table_data)
# Name of the test run must be present.
test_name = FindConfiguration(test_configuration, 'name')
if not test_name:
messages.append('Invalid input file: %s. Missing configuration key '
'"name"', filename)
continue
names.append(test_name)
# Create data helper and build data tables for each graph.
helper = webrtc.data_helper.DataHelper(data_list, table_description,
names, messages)
# Loading it into gviz_api.DataTable objects and create JSON strings.
description, data = helper.CreateConfigurationTable(test_configurations)
configurations = gviz_api.DataTable(description, data)
json_configurations = configurations.ToJSon() # pylint: disable=W0612
description, data = helper.CreateData('ssim')
ssim = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_ssim_data = ssim.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('psnr')
psnr = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_psnr_data = psnr.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('packets_dropped')
packet_loss = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
description, data = helper.CreateData('bit_rate')
# Add a column of data points for the desired bit rate to be plotted.
# (uses test configuration from the last data set, assuming it is the same
# for all of them)
desired_bit_rate = FindConfiguration(test_configuration, 'bit_rate_in_kbps')
if not desired_bit_rate:
ShowErrorPage('Cannot configuration field named "bit_rate_in_kbps"')
return
desired_bit_rate = int(desired_bit_rate)
# Add new column data type description.
description['desired_bit_rate'] = ('number', 'Desired bit rate (kbps)')
for row in data:
row['desired_bit_rate'] = desired_bit_rate
bit_rate = gviz_api.DataTable(description, data)
# pylint: disable=W0612
json_bit_rate_data = bit_rate.ToJSon(helper.GetOrdering(description))
# Format the messages list with newlines.
messages = '\n'.join(messages)
# Put the variables as JSon strings into the template.
print page_template % vars()
def FindConfiguration(configuration, name):
""" Finds a configuration value using it's name.
Returns the first configuration with a matching name. Returns None if no
matching configuration is found. """
return_value = None
for row in configuration:
if row['name'] == name:
return_value = row['value']
break
return return_value
def ShowErrorPage(error_message):
print '<html><body>%s</body></html>' % error_message
if __name__ == '__main__':
main()
|
golden1232004/webrtc_new
|
tools/python_charts/webrtc/main.py
|
Python
|
gpl-3.0
| 6,301
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
This decoder stacks on top of the 'jtag' PD and decodes JTAG data specific to
the STM32 microcontroller series.
Details:
https://en.wikipedia.org/wiki/STM32
http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/REFERENCE_MANUAL/CD00171190.pdf (e.g. chapter 31.7: "JTAG debug port")
'''
from .pd import *
|
JenSte/libsigrokdecode
|
decoders/jtag_stm32/__init__.py
|
Python
|
gpl-3.0
| 1,163
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = [
'Neema Kotonya (neemak@google.com)',
'Gun Pinyo (gunpinyo@google.com)'
]
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from common import tags
from controllers import sites
from models import custom_modules
from models import services
from modules.math import messages
MATH_MODULE_URI = '/modules/math'
RESOURCES_URI = MATH_MODULE_URI + '/resources'
MATHJAX_URI = MATH_MODULE_URI + '/MathJax'
class MathTag(tags.ContextAwareTag):
"""Custom tag for mathematical notation using MathJax."""
binding_name = 'gcb-math'
@classmethod
def name(cls):
return 'Mathematical Formula'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, context):
math_script = cElementTree.XML('<script/>')
# The formula is "text" type in the schema and so is presented in the
# tag's body.
math_script.text = node.text
input_type = node.attrib.get('input_type')
if input_type == 'MML':
math_script.set('type', 'math/mml')
else:
math_script.set('type', 'math/tex')
return math_script
def rollup_header_footer(self, context):
"""Include MathJax library only when a math tag is present."""
header = tags.html_string_to_element_tree("""
<script src="%s/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>""" % MATHJAX_URI)
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_icon_url(self):
return RESOURCES_URI + '/math.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(MathTag.name())
reg.add_property(
schema_fields.SchemaField(
'input_type', 'Type', 'string', i18n=False,
optional=True,
select_data=[('TeX', 'TeX'), ('MML', 'MathML')],
extra_schema_dict_values={'value': 'TeX'},
description=services.help_urls.make_learn_more_message(
messages.RTE_MATH_TYPE, 'math:math:input_type')))
reg.add_property(
schema_fields.SchemaField(
'formula', 'Mathematical Formula', 'text',
optional=True,
description=messages.RTE_MATH_MATHEMATICAL_FORMULA))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Mathematical Formula Display',
'Provides a custom tag to embed mathematical formulas using TeX or MML.'
, global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable)
return custom_module
|
GirlsCodePy/girlscode-coursebuilder
|
modules/math/math.py
|
Python
|
gpl-3.0
| 4,047
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Character ranges of letters
letters = 'a-zA-Z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u0103\u0106\u0107\
\u010c-\u010f\u0112-\u0115\u011a-\u012d\u0131\u0141\u0142\u0147\u0148\
\u0150-\u0153\u0158-\u0161\u0164\u0165\u016e-\u0171\u017d\u017e\
\u0391-\u03a1\u03a3-\u03a9\u03b1-\u03c9\u03d1\u03d2\u03d5\u03d6\
\u03da-\u03e1\u03f0\u03f1\u03f5\u210a-\u210c\u2110-\u2113\u211b\u211c\
\u2128\u212c\u212d\u212f-\u2131\u2133-\u2138\uf6b2-\uf6b5\uf6b7\uf6b9\
\uf6ba-\uf6bc\uf6be\uf6bf\uf6c1-\uf700\uf730\uf731\uf770\uf772\uf773\
\uf776\uf779\uf77a\uf77d-\uf780\uf782-\uf78b\uf78d-\uf78f\uf790\
\uf793-\uf79a\uf79c-\uf7a2\uf7a4-\uf7bd\uf800-\uf833\ufb01\ufb02'
# Character ranges of letterlikes
letterlikes = '\u0024\u00A1\u00A2\u00A3\u00A5\u00A7\u00A9\u00AB\u00AE\
\u00B0\u00B5\u00B6\u00B8\u00BB\u00BF\u02C7\u02D8\u2013\u2014\u2020\u2021\
\u2022\u2026\u2032\u2033\u2035\u2036\u2060\u20AC\u210F\u2122\u2127\u212B\
\u21B5\u2205\u221E\u221F\u2220\u2221\u2222\u22EE\u22EF\u22F0\u22F1\u2300\
\u2318\u231A\u23B4\u23B5\u2500\u2502\u25A0\u25A1\u25AA\u25AE\u25AF\u25B2\
\u25B3\u25BC\u25BD\u25C0\u25C6\u25C7\u25CB\u25CF\u25E6\u25FB\u25FC\u2605\
\u2639\u263A\u2660\u2661\u2662\u2663\u266D\u266E\u266F\u2736\uF3A0\uF3B8\
\uF3B9\uF527\uF528\uF720\uF721\uF722\uF723\uF725\uF749\uF74A\uF74D\uF74E\
\uF74F\uF750\uF751\uF752\uF753\uF754\uF755\uF756\uF757\uF760\uF763\uF766\
\uF768\uF769\uF76A\uF76B\uF76C\uF7D4\uF800\uF801\uF802\uF803\uF804\uF805\
\uF806\uF807\uF808\uF809\uF80A\uF80B\uF80C\uF80D\uF80E\uF80F\uF810\uF811\
\uF812\uF813\uF814\uF815\uF816\uF817\uF818\uF819\uF81A\uF81B\uF81C\uF81D\
\uF81E\uF81F\uF820\uF821\uF822\uF823\uF824\uF825\uF826\uF827\uF828\uF829\
\uF82A\uF82B\uF82C\uF82D\uF82E\uF82F\uF830\uF831\uF832\uF833\uFE35\uFE36\
\uFE37\uFE38'
# All supported longname characters
named_characters = {
'AAcute': '\u00E1',
'ABar': '\u0101',
'ACup': '\u0103',
'ADoubleDot': '\u00E4',
'AE': '\u00E6',
'AGrave': '\u00E0',
'AHat': '\u00E2',
'Aleph': '\u2135',
'AliasDelimiter': '\uF764',
'AliasIndicator': '\uF768',
'AlignmentMarker': '\uF760',
'Alpha': '\u03B1',
'AltKey': '\uF7D1',
'And': '\u2227',
'Angle': '\u2220',
'Angstrom': '\u212B',
'ARing': '\u00E5',
'AscendingEllipsis': '\u22F0',
'ATilde': '\u00E3',
'AutoLeftMatch': '\uF3A8',
'AutoOperand': '\uF3AE',
'AutoPlaceholder': '\uF3A4',
'AutoRightMatch': '\uF3A9',
'AutoSpace': '\uF3AD',
'Backslash': '\u2216',
'BeamedEighthNote': '\u266B',
'BeamedSixteenthNote': '\u266C',
'Because': '\u2235',
'Bet': '\u2136',
'Beta': '\u03B2',
'BlackBishop': '\u265D',
'BlackKing': '\u265A',
'BlackKnight': '\u265E',
'BlackPawn': '\u265F',
'BlackQueen': '\u265B',
'BlackRook': '\u265C',
'Breve': '\u02D8',
'Bullet': '\u2022',
'CAcute': '\u0107',
'CapitalAAcute': '\u00C1',
'CapitalABar': '\u0100',
'CapitalACup': '\u0102',
'CapitalADoubleDot': '\u00C4',
'CapitalAE': '\u00C6',
'CapitalAGrave': '\u00C0',
'CapitalAHat': '\u00C2',
'CapitalAlpha': '\u0391',
'CapitalARing': '\u00C5',
'CapitalATilde': '\u00C3',
'CapitalBeta': '\u0392',
'CapitalCAcute': '\u0106',
'CapitalCCedilla': '\u00C7',
'CapitalCHacek': '\u010C',
'CapitalChi': '\u03A7',
'CapitalDelta': '\u0394',
'CapitalDHacek': '\u010E',
'CapitalDifferentialD': '\uF74B',
'CapitalDigamma': '\u03DC',
'CapitalEAcute': '\u00C9',
'CapitalEBar': '\u0112',
'CapitalECup': '\u0114',
'CapitalEDoubleDot': '\u00CB',
'CapitalEGrave': '\u00C8',
'CapitalEHacek': '\u011A',
'CapitalEHat': '\u00CA',
'CapitalEpsilon': '\u0395',
'CapitalEta': '\u0397',
'CapitalEth': '\u00D0',
'CapitalGamma': '\u0393',
'CapitalIAcute': '\u00CD',
'CapitalICup': '\u012C',
'CapitalIDoubleDot': '\u00CF',
'CapitalIGrave': '\u00CC',
'CapitalIHat': '\u00CE',
'CapitalIota': '\u0399',
'CapitalKappa': '\u039A',
'CapitalKoppa': '\u03DE',
'CapitalLambda': '\u039B',
'CapitalLSlash': '\u0141',
'CapitalMu': '\u039C',
'CapitalNHacek': '\u0147',
'CapitalNTilde': '\u00D1',
'CapitalNu': '\u039D',
'CapitalOAcute': '\u00D3',
'CapitalODoubleAcute': '\u0150',
'CapitalODoubleDot': '\u00D6',
'CapitalOE': '\u0152',
'CapitalOGrave': '\u00D2',
'CapitalOHat': '\u00D4',
'CapitalOmega': '\u03A9',
'CapitalOmicron': '\u039F',
'CapitalOSlash': '\u00D8',
'CapitalOTilde': '\u00D5',
'CapitalPhi': '\u03A6',
'CapitalPi': '\u03A0',
'CapitalPsi': '\u03A8',
'CapitalRHacek': '\u0158',
'CapitalRho': '\u03A1',
'CapitalSampi': '\u03E0',
'CapitalSHacek': '\u0160',
'CapitalSigma': '\u03A3',
'CapitalStigma': '\u03DA',
'CapitalTau': '\u03A4',
'CapitalTHacek': '\u0164',
'CapitalTheta': '\u0398',
'CapitalThorn': '\u00DE',
'CapitalUAcute': '\u00DA',
'CapitalUDoubleAcute': '\u0170',
'CapitalUDoubleDot': '\u00DC',
'CapitalUGrave': '\u00D9',
'CapitalUHat': '\u00DB',
'CapitalUpsilon': '\u03A5',
'CapitalURing': '\u016E',
'CapitalXi': '\u039E',
'CapitalYAcute': '\u00DD',
'CapitalZeta': '\u0396',
'CapitalZHacek': '\u017D',
'Cap': '\u2322',
'CCedilla': '\u00E7',
'Cedilla': '\u00B8',
'CenterDot': '\u00B7',
'CenterEllipsis': '\u22EF',
'Cent': '\u00A2',
'CHacek': '\u010D',
'Checkmark': '\u2713',
'Chi': '\u03C7',
'CircleDot': '\u2299',
'CircleMinus': '\u2296',
'CirclePlus': '\u2295',
'CircleTimes': '\u2297',
'ClockwiseContourIntegral': '\u2232',
'CloseCurlyDoubleQuote': '\u201D',
'CloseCurlyQuote': '\u2019',
'CloverLeaf': '\u2318',
'ClubSuit': '\u2663',
'Colon': '\u2236',
'CommandKey': '\uF76A',
'Congruent': '\u2261',
'Conjugate': '\uF3C8',
'ConjugateTranspose': '\uF3C9',
'ConstantC': '\uF7DA',
'Continuation': '\uF3B1',
'ContourIntegral': '\u222E',
'ControlKey': '\uF763',
'Coproduct': '\u2210',
'Copyright': '\u00A9',
'CounterClockwiseContourIntegral': '\u2233',
'Cross': '\uF4A0',
'CupCap': '\u224D',
'Cup': '\u2323',
'CurlyCapitalUpsilon': '\u03D2',
'CurlyEpsilon': '\u03B5',
'CurlyKappa': '\u03F0',
'CurlyPhi': '\u03C6',
'CurlyPi': '\u03D6',
'CurlyRho': '\u03F1',
'CurlyTheta': '\u03D1',
'Currency': '\u00A4',
'Dagger': '\u2020',
'Dalet': '\u2138',
'Dash': '\u2013',
'Degree': '\u00B0',
'DeleteKey': '\uF7D0',
'Del': '\u2207',
'Delta': '\u03B4',
'DescendingEllipsis': '\u22F1',
'DHacek': '\u010F',
'Diameter': '\u2300',
'Diamond': '\u22C4',
'DiamondSuit': '\u2662',
'DifferenceDelta': '\u2206',
'DifferentialD': '\uF74C',
'Digamma': '\u03DD',
'DiscreteRatio': '\uF4A4',
'DiscreteShift': '\uF4A3',
'DiscretionaryHyphen': '\u00AD',
'DiscretionaryLineSeparator': '\uF76E',
'DiscretionaryParagraphSeparator': '\uF76F',
'Divide': '\u00F7',
'DotEqual': '\u2250',
'DotlessI': '\u0131',
'DotlessJ': '\uF700',
'DottedSquare': '\uF751',
'DoubleContourIntegral': '\u222F',
'DoubleDagger': '\u2021',
'DoubledGamma': '\uF74A',
'DoubleDownArrow': '\u21D3',
'DoubledPi': '\uF749',
'DoubleLeftArrow': '\u21D0',
'DoubleLeftRightArrow': '\u21D4',
'DoubleLeftTee': '\u2AE4',
'DoubleLongLeftArrow': '\u27F8',
'DoubleLongLeftRightArrow': '\u27FA',
'DoubleLongRightArrow': '\u27F9',
'DoublePrime': '\u2033',
'DoubleRightArrow': '\u21D2',
'DoubleRightTee': '\u22A8',
'DoubleStruckA': '\uF6E6',
'DoubleStruckB': '\uF6E7',
'DoubleStruckC': '\uF6E8',
'DoubleStruckCapitalA': '\uF7A4',
'DoubleStruckCapitalB': '\uF7A5',
'DoubleStruckCapitalC': '\uF7A6',
'DoubleStruckCapitalD': '\uF7A7',
'DoubleStruckCapitalE': '\uF7A8',
'DoubleStruckCapitalF': '\uF7A9',
'DoubleStruckCapitalG': '\uF7AA',
'DoubleStruckCapitalH': '\uF7AB',
'DoubleStruckCapitalI': '\uF7AC',
'DoubleStruckCapitalJ': '\uF7AD',
'DoubleStruckCapitalK': '\uF7AE',
'DoubleStruckCapitalL': '\uF7AF',
'DoubleStruckCapitalM': '\uF7B0',
'DoubleStruckCapitalN': '\uF7B1',
'DoubleStruckCapitalO': '\uF7B2',
'DoubleStruckCapitalP': '\uF7B3',
'DoubleStruckCapitalQ': '\uF7B4',
'DoubleStruckCapitalR': '\uF7B5',
'DoubleStruckCapitalS': '\uF7B6',
'DoubleStruckCapitalT': '\uF7B7',
'DoubleStruckCapitalU': '\uF7B8',
'DoubleStruckCapitalV': '\uF7B9',
'DoubleStruckCapitalW': '\uF7BA',
'DoubleStruckCapitalX': '\uF7BB',
'DoubleStruckCapitalY': '\uF7BC',
'DoubleStruckCapitalZ': '\uF7BD',
'DoubleStruckD': '\uF6E9',
'DoubleStruckE': '\uF6EA',
'DoubleStruckEight': '\uF7E3',
'DoubleStruckF': '\uF6EB',
'DoubleStruckFive': '\uF7E0',
'DoubleStruckFour': '\uF7DF',
'DoubleStruckG': '\uF6EC',
'DoubleStruckH': '\uF6ED',
'DoubleStruckI': '\uF6EE',
'DoubleStruckJ': '\uF6EF',
'DoubleStruckK': '\uF6F0',
'DoubleStruckL': '\uF6F1',
'DoubleStruckM': '\uF6F2',
'DoubleStruckN': '\uF6F3',
'DoubleStruckNine': '\uF7E4',
'DoubleStruckO': '\uF6F4',
'DoubleStruckOne': '\uF7DC',
'DoubleStruckP': '\uF6F5',
'DoubleStruckQ': '\uF6F6',
'DoubleStruckR': '\uF6F7',
'DoubleStruckS': '\uF6F8',
'DoubleStruckSeven': '\uF7E2',
'DoubleStruckSix': '\uF7E1',
'DoubleStruckT': '\uF6F9',
'DoubleStruckThree': '\uF7DE',
'DoubleStruckTwo': '\uF7DD',
'DoubleStruckU': '\uF6FA',
'DoubleStruckV': '\uF6FB',
'DoubleStruckW': '\uF6FC',
'DoubleStruckX': '\uF6FD',
'DoubleStruckY': '\uF6FE',
'DoubleStruckZ': '\uF6FF',
'DoubleStruckZero': '\uF7DB',
'DoubleUpArrow': '\u21D1',
'DoubleUpDownArrow': '\u21D5',
'DoubleVerticalBar': '\u2225',
'DownArrowBar': '\u2913',
'DownArrow': '\u2193',
'DownArrowUpArrow': '\u21F5',
'DownBreve': '\uF755',
'DownExclamation': '\u00A1',
'DownLeftRightVector': '\u2950',
'DownLeftTeeVector': '\u295E',
'DownLeftVector': '\u21BD',
'DownLeftVectorBar': '\u2956',
'DownPointer': '\u25BE',
'DownQuestion': '\u00BF',
'DownRightTeeVector': '\u295F',
'DownRightVector': '\u21C1',
'DownRightVectorBar': '\u2957',
'DownTeeArrow': '\u21A7',
'DownTee': '\u22A4',
'EAcute': '\u00E9',
'Earth': '\u2641',
'EBar': '\u0113',
'ECup': '\u0115',
'EDoubleDot': '\u00EB',
'EGrave': '\u00E8',
'EHacek': '\u011B',
'EHat': '\u00EA',
'EighthNote': '\u266A',
'Element': '\u2208',
'Ellipsis': '\u2026',
'EmptyCircle': '\u25CB',
'EmptyDiamond': '\u25C7',
'EmptyDownTriangle': '\u25BD',
'EmptyRectangle': '\u25AF',
'EmptySet': '\u2205',
'EmptySmallCircle': '\u25E6',
'EmptySmallSquare': '\u25FB',
'EmptySquare': '\u25A1',
'EmptyUpTriangle': '\u25B3',
'EmptyVerySmallSquare': '\u25AB',
'EnterKey': '\uF7D4',
'EntityEnd': '\uF3B9',
'EntityStart': '\uF3B8',
'Epsilon': '\u03F5',
'Equal': '\uF431',
'EqualTilde': '\u2242',
'Equilibrium': '\u21CC',
'Equivalent': '\u29E6',
'ErrorIndicator': '\uF767',
'EscapeKey': '\uF769',
'Eta': '\u03B7',
'Eth': '\u00F0',
'Euro': '\u20AC',
'Exists': '\u2203',
'ExponentialE': '\uF74D',
'FiLigature': '\uFB01',
'FilledCircle': '\u25CF',
'FilledDiamond': '\u25C6',
'FilledDownTriangle': '\u25BC',
'FilledLeftTriangle': '\u25C0',
'FilledRectangle': '\u25AE',
'FilledRightTriangle': '\u25B6',
'FilledSmallCircle': '\uF750',
'FilledSmallSquare': '\u25FC',
'FilledSquare': '\u25A0',
'FilledUpTriangle': '\u25B2',
'FilledVerySmallSquare': '\u25AA',
'FinalSigma': '\u03C2',
'FirstPage': '\uF7FA',
'FivePointedStar': '\u2605',
'Flat': '\u266D',
'FlLigature': '\uFB02',
'Florin': '\u0192',
'ForAll': '\u2200',
'FormalA': '\uF800',
'FormalB': '\uF801',
'FormalC': '\uF802',
'FormalCapitalA': '\uF81A',
'FormalCapitalB': '\uF81B',
'FormalCapitalC': '\uF81C',
'FormalCapitalD': '\uF81D',
'FormalCapitalE': '\uF81E',
'FormalCapitalF': '\uF81F',
'FormalCapitalG': '\uF820',
'FormalCapitalH': '\uF821',
'FormalCapitalI': '\uF822',
'FormalCapitalJ': '\uF823',
'FormalCapitalK': '\uF824',
'FormalCapitalL': '\uF825',
'FormalCapitalM': '\uF826',
'FormalCapitalN': '\uF827',
'FormalCapitalO': '\uF828',
'FormalCapitalP': '\uF829',
'FormalCapitalQ': '\uF82A',
'FormalCapitalR': '\uF82B',
'FormalCapitalS': '\uF82C',
'FormalCapitalT': '\uF82D',
'FormalCapitalU': '\uF82E',
'FormalCapitalV': '\uF82F',
'FormalCapitalW': '\uF830',
'FormalCapitalX': '\uF831',
'FormalCapitalY': '\uF832',
'FormalCapitalZ': '\uF833',
'FormalD': '\uF803',
'FormalE': '\uF804',
'FormalF': '\uF805',
'FormalG': '\uF806',
'FormalH': '\uF807',
'FormalI': '\uF808',
'FormalJ': '\uF809',
'FormalK': '\uF80A',
'FormalL': '\uF80B',
'FormalM': '\uF80C',
'FormalN': '\uF80D',
'FormalO': '\uF80E',
'FormalP': '\uF80F',
'FormalQ': '\uF810',
'FormalR': '\uF811',
'FormalS': '\uF812',
'FormalT': '\uF813',
'FormalU': '\uF814',
'FormalV': '\uF815',
'FormalW': '\uF816',
'FormalX': '\uF817',
'FormalY': '\uF818',
'FormalZ': '\uF819',
'FreakedSmiley': '\uF721',
'Function': '\uF4A1',
'Gamma': '\u03B3',
'Gimel': '\u2137',
'GothicA': '\uF6CC',
'GothicB': '\uF6CD',
'GothicC': '\uF6CE',
'GothicCapitalA': '\uF78A',
'GothicCapitalB': '\uF78B',
'GothicCapitalC': '\u212D',
'GothicCapitalD': '\uF78D',
'GothicCapitalE': '\uF78E',
'GothicCapitalF': '\uF78F',
'GothicCapitalG': '\uF790',
'GothicCapitalH': '\u210C',
'GothicCapitalI': '\u2111',
'GothicCapitalJ': '\uF793',
'GothicCapitalK': '\uF794',
'GothicCapitalL': '\uF795',
'GothicCapitalM': '\uF796',
'GothicCapitalN': '\uF797',
'GothicCapitalO': '\uF798',
'GothicCapitalP': '\uF799',
'GothicCapitalQ': '\uF79A',
'GothicCapitalR': '\u211C',
'GothicCapitalS': '\uF79C',
'GothicCapitalT': '\uF79D',
'GothicCapitalU': '\uF79E',
'GothicCapitalV': '\uF79F',
'GothicCapitalW': '\uF7A0',
'GothicCapitalX': '\uF7A1',
'GothicCapitalY': '\uF7A2',
'GothicCapitalZ': '\u2128',
'GothicD': '\uF6CF',
'GothicE': '\uF6D0',
'GothicEight': '\uF7ED',
'GothicF': '\uF6D1',
'GothicFive': '\uF7EA',
'GothicFour': '\uF7E9',
'GothicG': '\uF6D2',
'GothicH': '\uF6D3',
'GothicI': '\uF6D4',
'GothicJ': '\uF6D5',
'GothicK': '\uF6D6',
'GothicL': '\uF6D7',
'GothicM': '\uF6D8',
'GothicN': '\uF6D9',
'GothicNine': '\uF7EF',
'GothicO': '\uF6DA',
'GothicOne': '\uF7E6',
'GothicP': '\uF6DB',
'GothicQ': '\uF6DC',
'GothicR': '\uF6DD',
'GothicS': '\uF6DE',
'GothicSeven': '\uF7EC',
'GothicSix': '\uF7EB',
'GothicT': '\uF6DF',
'GothicThree': '\uF7E8',
'GothicTwo': '\uF7E7',
'GothicU': '\uF6E0',
'GothicV': '\uF6E1',
'GothicW': '\uF6E2',
'GothicX': '\uF6E3',
'GothicY': '\uF6E4',
'GothicZ': '\uF6E5',
'GothicZero': '\uF7E5',
'GrayCircle': '\uF753',
'GraySquare': '\uF752',
'GreaterEqualLess': '\u22DB',
'GreaterEqual': '\u2265',
'GreaterFullEqual': '\u2267',
'GreaterGreater': '\u226B',
'GreaterLess': '\u2277',
'GreaterSlantEqual': '\u2A7E',
'GreaterTilde': '\u2273',
'Hacek': '\u02C7',
'HappySmiley': '\u263A',
'HBar': '\u210F',
'HeartSuit': '\u2661',
'HermitianConjugate': '\uF3CE',
'HorizontalLine': '\u2500',
'HumpDownHump': '\u224E',
'HumpEqual': '\u224F',
'Hyphen': '\u2010',
'IAcute': '\u00ED',
'ICup': '\u012D',
'IDoubleDot': '\u00EF',
'IGrave': '\u00EC',
'IHat': '\u00EE',
'ImaginaryI': '\uF74E',
'ImaginaryJ': '\uF74F',
'ImplicitPlus': '\uF39E',
'Implies': '\uF523',
'Infinity': '\u221E',
'Integral': '\u222B',
'Intersection': '\u22C2',
'InvisibleApplication': '\uF76D',
'InvisibleComma': '\uF765',
'InvisiblePostfixScriptBase': '\uF3B4',
'InvisiblePrefixScriptBase': '\uF3B3',
'InvisibleSpace': '\uF360',
'InvisibleTimes': '\u2062',
'Iota': '\u03B9',
'Jupiter': '\u2643',
'Kappa': '\u03BA',
'KernelIcon': '\uF756',
'Koppa': '\u03DF',
'Lambda': '\u03BB',
'LastPage': '\uF7FB',
'LeftAngleBracket': '\u2329',
'LeftArrowBar': '\u21E4',
'LeftArrow': '\u2190',
'LeftArrowRightArrow': '\u21C6',
'LeftBracketingBar': '\uF603',
'LeftCeiling': '\u2308',
'LeftDoubleBracket': '\u301A',
'LeftDoubleBracketingBar': '\uF605',
'LeftDownTeeVector': '\u2961',
'LeftDownVectorBar': '\u2959',
'LeftDownVector': '\u21C3',
'LeftFloor': '\u230A',
'LeftGuillemet': '\u00AB',
'LeftModified': '\uF76B',
'LeftPointer': '\u25C2',
'LeftRightArrow': '\u2194',
'LeftRightVector': '\u294E',
'LeftSkeleton': '\uF761',
'LeftTee': '\u22A3',
'LeftTeeArrow': '\u21A4',
'LeftTeeVector': '\u295A',
'LeftTriangle': '\u22B2',
'LeftTriangleBar': '\u29CF',
'LeftTriangleEqual': '\u22B4',
'LeftUpDownVector': '\u2951',
'LeftUpTeeVector': '\u2960',
'LeftUpVector': '\u21BF',
'LeftUpVectorBar': '\u2958',
'LeftVector': '\u21BC',
'LeftVectorBar': '\u2952',
'LessEqual': '\u2264',
'LessEqualGreater': '\u22DA',
'LessFullEqual': '\u2266',
'LessGreater': '\u2276',
'LessLess': '\u226A',
'LessSlantEqual': '\u2A7D',
'LessTilde': '\u2272',
'LetterSpace': '\uF754',
'LightBulb': '\uF723',
'LongDash': '\u2014',
'LongEqual': '\uF7D9',
'LongLeftArrow': '\u27F5',
'LongLeftRightArrow': '\u27F7',
'LongRightArrow': '\u27F6',
'LowerLeftArrow': '\u2199',
'LowerRightArrow': '\u2198',
'LSlash': '\u0142',
'Mars': '\u2642',
'MathematicaIcon': '\uF757',
'MeasuredAngle': '\u2221',
'MediumSpace': '\u205F',
'Mercury': '\u263F',
'Mho': '\u2127',
'Micro': '\u00B5',
'Minus': '\u2212',
'MinusPlus': '\u2213',
'Mu': '\u03BC',
'Nand': '\u22BC',
'Natural': '\u266E',
'NegativeMediumSpace': '\uF383',
'NegativeThickSpace': '\uF384',
'NegativeThinSpace': '\uF382',
'NegativeVeryThinSpace': '\uF380',
'Neptune': '\u2646',
'NestedGreaterGreater': '\u2AA2',
'NestedLessLess': '\u2AA1',
'NeutralSmiley': '\uF722',
'NHacek': '\u0148',
'NoBreak': '\u2060',
'NonBreakingSpace': '\u00A0',
'Nor': '\u22BD',
'NotCongruent': '\u2262',
'NotCupCap': '\u226D',
'NotDoubleVerticalBar': '\u2226',
'NotElement': '\u2209',
'NotEqual': '\u2260',
'NotEqualTilde': '\uF400',
'NotExists': '\u2204',
'NotGreater': '\u226F',
'NotGreaterEqual': '\u2271',
'NotGreaterFullEqual': '\u2269',
'NotGreaterGreater': '\uF427',
'NotGreaterLess': '\u2279',
'NotGreaterSlantEqual': '\uF429',
'NotGreaterTilde': '\u2275',
'NotHumpDownHump': '\uF402',
'NotHumpEqual': '\uF401',
'NotLeftTriangle': '\u22EA',
'NotLeftTriangleBar': '\uF412',
'NotLeftTriangleEqual': '\u22EC',
'NotLessEqual': '\u2270',
'NotLessFullEqual': '\u2268',
'NotLessGreater': '\u2278',
'NotLess': '\u226E',
'NotLessLess': '\uF422',
'NotLessSlantEqual': '\uF424',
'NotLessTilde': '\u2274',
'Not': '\u00AC',
'NotNestedGreaterGreater': '\uF428',
'NotNestedLessLess': '\uF423',
'NotPrecedes': '\u2280',
'NotPrecedesEqual': '\uF42B',
'NotPrecedesSlantEqual': '\u22E0',
'NotPrecedesTilde': '\u22E8',
'NotReverseElement': '\u220C',
'NotRightTriangle': '\u22EB',
'NotRightTriangleBar': '\uF413',
'NotRightTriangleEqual': '\u22ED',
'NotSquareSubset': '\uF42E',
'NotSquareSubsetEqual': '\u22E2',
'NotSquareSuperset': '\uF42F',
'NotSquareSupersetEqual': '\u22E3',
'NotSubset': '\u2284',
'NotSubsetEqual': '\u2288',
'NotSucceeds': '\u2281',
'NotSucceedsEqual': '\uF42D',
'NotSucceedsSlantEqual': '\u22E1',
'NotSucceedsTilde': '\u22E9',
'NotSuperset': '\u2285',
'NotSupersetEqual': '\u2289',
'NotTilde': '\u2241',
'NotTildeEqual': '\u2244',
'NotTildeFullEqual': '\u2247',
'NotTildeTilde': '\u2249',
'NotVerticalBar': '\u2224',
'NTilde': '\u00F1',
'Nu': '\u03BD',
'Null': '\uF3A0',
'NumberSign': '\uF724',
'OAcute': '\u00F3',
'ODoubleAcute': '\u0151',
'ODoubleDot': '\u00F6',
'OE': '\u0153',
'OGrave': '\u00F2',
'OHat': '\u00F4',
'Omega': '\u03C9',
'Omicron': '\u03BF',
'OpenCurlyDoubleQuote': '\u201C',
'OpenCurlyQuote': '\u2018',
'OptionKey': '\uF7D2',
'Or': '\u2228',
'OSlash': '\u00F8',
'OTilde': '\u00F5',
'OverBrace': '\uFE37',
'OverBracket': '\u23B4',
'OverParenthesis': '\uFE35',
'Paragraph': '\u00B6',
'PartialD': '\u2202',
'Phi': '\u03D5',
'Pi': '\u03C0',
'Piecewise': '\uF361',
'Placeholder': '\uF528',
'PlusMinus': '\u00B1',
'Pluto': '\u2647',
'Precedes': '\u227A',
'PrecedesEqual': '\u2AAF',
'PrecedesSlantEqual': '\u227C',
'PrecedesTilde': '\u227E',
'Prime': '\u2032',
'Product': '\u220F',
'Proportion': '\u2237',
'Proportional': '\u221D',
'Psi': '\u03C8',
'QuarterNote': '\u2669',
'RawAmpersand': '\u0026',
'RawAt': '\u0040',
'RawBackquote': '\u0060',
'RawBackslash': '\u005C',
'RawColon': '\u003A',
'RawComma': '\u002C',
'RawDash': '\u002D',
'RawDollar': '\u0024',
'RawDot': '\u002E',
'RawDoubleQuote': '\u0022',
'RawEqual': '\u003D',
'RawEscape': '\u001B',
'RawExclamation': '\u0021',
'RawGreater': '\u003E',
'RawLeftBrace': '\u007B',
'RawLeftBracket': '\u005B',
'RawLeftParenthesis': '\u0028',
'RawLess': '\u003C',
'RawNumberSign': '\u0023',
'RawPercent': '\u0025',
'RawPlus': '\u002B',
'RawQuestion': '\u003F',
'RawQuote': '\u0027',
'RawRightBrace': '\u007D',
'RawRightBracket': '\u005D',
'RawRightParenthesis': '\u0029',
'RawSemicolon': '\u003B',
'RawSlash': '\u002F',
'RawSpace': '\u0020',
'RawStar': '\u002A',
'RawTab': '\u0009',
'RawTilde': '\u007E',
'RawUnderscore': '\u005F',
'RawVerticalBar': '\u007C',
'RawWedge': '\u005E',
'RegisteredTrademark': '\u00AE',
'ReturnIndicator': '\u21B5',
'ReturnKey': '\uF766',
'ReverseDoublePrime': '\u2036',
'ReverseElement': '\u220B',
'ReverseEquilibrium': '\u21CB',
'ReversePrime': '\u2035',
'ReverseUpEquilibrium': '\u296F',
'RHacek': '\u0159',
'Rho': '\u03C1',
'RightAngle': '\u221F',
'RightAngleBracket': '\u232A',
'RightArrow': '\u2192',
'RightArrowBar': '\u21E5',
'RightArrowLeftArrow': '\u21C4',
'RightBracketingBar': '\uF604',
'RightCeiling': '\u2309',
'RightDoubleBracket': '\u301B',
'RightDoubleBracketingBar': '\uF606',
'RightDownTeeVector': '\u295D',
'RightDownVector': '\u21C2',
'RightDownVectorBar': '\u2955',
'RightFloor': '\u230B',
'RightGuillemet': '\u00BB',
'RightModified': '\uF76C',
'RightPointer': '\u25B8',
'RightSkeleton': '\uF762',
'RightTee': '\u22A2',
'RightTeeArrow': '\u21A6',
'RightTeeVector': '\u295B',
'RightTriangle': '\u22B3',
'RightTriangleBar': '\u29D0',
'RightTriangleEqual': '\u22B5',
'RightUpDownVector': '\u294F',
'RightUpTeeVector': '\u295C',
'RightUpVector': '\u21BE',
'RightUpVectorBar': '\u2954',
'RightVector': '\u21C0',
'RightVectorBar': '\u2953',
'RoundImplies': '\u2970',
'RoundSpaceIndicator': '\uF3B2',
'Rule': '\uF522',
'RuleDelayed': '\uF51F',
'SadSmiley': '\u2639',
'Sampi': '\u03E0',
'Saturn': '\u2644',
'ScriptA': '\uF6B2',
'ScriptB': '\uF6B3',
'ScriptC': '\uF6B4',
'ScriptCapitalA': '\uF770',
'ScriptCapitalB': '\u212C',
'ScriptCapitalC': '\uF772',
'ScriptCapitalD': '\uF773',
'ScriptCapitalE': '\u2130',
'ScriptCapitalF': '\u2131',
'ScriptCapitalG': '\uF776',
'ScriptCapitalH': '\u210B',
'ScriptCapitalI': '\u2110',
'ScriptCapitalJ': '\uF779',
'ScriptCapitalK': '\uF77A',
'ScriptCapitalL': '\u2112',
'ScriptCapitalM': '\u2133',
'ScriptCapitalN': '\uF77D',
'ScriptCapitalO': '\uF77E',
'ScriptCapitalP': '\u2118',
'ScriptCapitalQ': '\uF780',
'ScriptCapitalR': '\u211B',
'ScriptCapitalS': '\uF782',
'ScriptCapitalT': '\uF783',
'ScriptCapitalU': '\uF784',
'ScriptCapitalV': '\uF785',
'ScriptCapitalW': '\uF786',
'ScriptCapitalX': '\uF787',
'ScriptCapitalY': '\uF788',
'ScriptCapitalZ': '\uF789',
'ScriptD': '\uF6B5',
'ScriptDotlessI': '\uF730',
'ScriptDotlessJ': '\uF731',
'ScriptE': '\u212F',
'ScriptEight': '\uF7F8',
'ScriptF': '\uF6B7',
'ScriptFive': '\uF7F5',
'ScriptFour': '\uF7F4',
'ScriptG': '\u210A',
'ScriptH': '\uF6B9',
'ScriptI': '\uF6BA',
'ScriptJ': '\uF6BB',
'ScriptK': '\uF6BC',
'ScriptL': '\u2113',
'ScriptM': '\uF6BE',
'ScriptN': '\uF6BF',
'ScriptNine': '\uF7F9',
'ScriptO': '\u2134',
'ScriptOne': '\uF7F1',
'ScriptP': '\uF6C1',
'ScriptQ': '\uF6C2',
'ScriptR': '\uF6C3',
'ScriptS': '\uF6C4',
'ScriptSeven': '\uF7F7',
'ScriptSix': '\uF7F6',
'ScriptT': '\uF6C5',
'ScriptThree': '\uF7F3',
'ScriptTwo': '\uF7F2',
'ScriptU': '\uF6C6',
'ScriptV': '\uF6C7',
'ScriptW': '\uF6C8',
'ScriptX': '\uF6C9',
'ScriptY': '\uF6CA',
'ScriptZ': '\uF6CB',
'ScriptZero': '\uF7F0',
'Section': '\u00A7',
'SelectionPlaceholder': '\uF527',
'SHacek': '\u0161',
'Sharp': '\u266F',
'ShortLeftArrow': '\uF526',
'ShortRightArrow': '\uF525',
'Sigma': '\u03C3',
'SixPointedStar': '\u2736',
'SkeletonIndicator': '\u2043',
'SmallCircle': '\u2218',
'SpaceIndicator': '\u2423',
'SpaceKey': '\uF7BF',
'SpadeSuit': '\u2660',
'SpanFromAbove': '\uF3BB',
'SpanFromBoth': '\uF3BC',
'SpanFromLeft': '\uF3BA',
'SphericalAngle': '\u2222',
'Sqrt': '\u221A',
'Square': '\uF520',
'SquareIntersection': '\u2293',
'SquareSubset': '\u228F',
'SquareSubsetEqual': '\u2291',
'SquareSuperset': '\u2290',
'SquareSupersetEqual': '\u2292',
'SquareUnion': '\u2294',
'Star': '\u22C6',
'Sterling': '\u00A3',
'Stigma': '\u03DB',
'Subset': '\u2282',
'SubsetEqual': '\u2286',
'Succeeds': '\u227B',
'SucceedsEqual': '\u2AB0',
'SucceedsSlantEqual': '\u227D',
'SucceedsTilde': '\u227F',
'SuchThat': '\u220D',
'Sum': '\u2211',
'Superset': '\u2283',
'SupersetEqual': '\u2287',
'SystemEnterKey': '\uF75F',
'SZ': '\u00DF',
'TabKey': '\uF7BE',
'Tau': '\u03C4',
'THacek': '\u0165',
'Therefore': '\u2234',
'Theta': '\u03B8',
'ThickSpace': '\u2005',
'ThinSpace': '\u2009',
'Thorn': '\u00FE',
'Tilde': '\u223C',
'TildeEqual': '\u2243',
'TildeFullEqual': '\u2245',
'TildeTilde': '\u2248',
'Times': '\u00D7',
'Trademark': '\u2122',
'Transpose': '\uF3C7',
'UAcute': '\u00FA',
'UDoubleAcute': '\u0171',
'UDoubleDot': '\u00FC',
'UGrave': '\u00F9',
'UHat': '\u00FB',
'UnderBrace': '\uFE38',
'UnderBracket': '\u23B5',
'UnderParenthesis': '\uFE36',
'Union': '\u22C3',
'UnionPlus': '\u228E',
'UpArrow': '\u2191',
'UpArrowBar': '\u2912',
'UpArrowDownArrow': '\u21C5',
'UpDownArrow': '\u2195',
'UpEquilibrium': '\u296E',
'UpperLeftArrow': '\u2196',
'UpperRightArrow': '\u2197',
'UpPointer': '\u25B4',
'Upsilon': '\u03C5',
'UpTee': '\u22A5',
'UpTeeArrow': '\u21A5',
'Uranus': '\u2645',
'URing': '\u016F',
'Vee': '\u22C1',
'Venus': '\u2640',
'VerticalBar': '\u2223',
'VerticalEllipsis': '\u22EE',
'VerticalLine': '\u2502',
'VerticalSeparator': '\uF432',
'VerticalTilde': '\u2240',
'VeryThinSpace': '\u200A',
'WarningSign': '\uF725',
'WatchIcon': '\u231A',
'Wedge': '\u22C0',
'WeierstrassP': '\u2118',
'WhiteBishop': '\u2657',
'WhiteKing': '\u2654',
'WhiteKnight': '\u2658',
'WhitePawn': '\u2659',
'WhiteQueen': '\u2655',
'WhiteRook': '\u2656',
'Wolf': '\uF720',
'Xi': '\u03BE',
'Xnor': '\uF4A2',
'Xor': '\u22BB',
'YAcute': '\u00FD',
'YDoubleDot': '\u00FF',
'Yen': '\u00A5',
'Zeta': '\u03B6',
'ZHacek': '\u017E',
}
aliased_characters = {
"a'": '\u00E1',
'a-': '\u0101',
'au': '\u0103',
'a"': '\u00E4',
'ae': '\u00E6',
'a`': '\u00E0',
'a^': '\u00E2',
'al': '\u2135',
'esc': '\uF768',
'am': '\uF760',
'a': '\u03B1',
'alpha': '\u03B1',
'alt': '\uF7D1',
'&&': '\u2227',
'and': '\u2227',
'Ang': '\u212B',
'ao': '\u00E5',
'a~': '\u00E3',
'\\': '\u2216',
'be': '\u2136',
'b': '\u03B2',
'beta': '\u03B2',
'bv': '\u02D8',
'bu': '\u2022',
"c'": '\u0107',
"A'": '\u00C1',
'A-': '\u0100',
'Au': '\u0102',
'A"': '\u00C4',
'AE': '\u00C6',
'A`': '\u00C0',
'A^': '\u00C2',
'A': '\u0391',
'Alpha': '\u0391',
'Ao': '\u00C5',
'A~': '\u00C3',
'B': '\u0392',
'Beta': '\u0392',
"C'": '\u0106',
'C,': '\u00C7',
'Cv': '\u010C',
'Ch': '\u03A7',
'Chi': '\u03A7',
'C': '\u03A7',
'D': '\u0394',
'Delta': '\u0394',
'Dv': '\u010E',
'DD': '\uF74B',
'Di': '\u03DC',
'Digamma': '\u03DC',
"E'": '\u00C9',
'E-': '\u0112',
'Eu': '\u0114',
'E"': '\u00CB',
'E`': '\u00C8',
'Ev': '\u011A',
'E^': '\u00CA',
'E': '\u0395',
'Epsilon': '\u0395',
'Et': '\u0397',
'Eta': '\u0397',
'H': '\u0397',
'D-': '\u00D0',
'G': '\u0393',
'Gamma': '\u0393',
"I'": '\u00CD',
'Iu': '\u012C',
'I"': '\u00CF',
'I`': '\u00CC',
'I^': '\u00CE',
'I': '\u0399',
'Iota': '\u0399',
'K': '\u039A',
'Kappa': '\u039A',
'Ko': '\u03DE',
'Koppa': '\u03DE',
'L': '\u039B',
'Lambda': '\u039B',
'L/': '\u0141',
'M': '\u039C',
'Mu': '\u039C',
'Nv': '\u0147',
'N~': '\u00D1',
'N': '\u039D',
'Nu': '\u039D',
"O'": '\u00D3',
"O''": '\u0150',
'O"': '\u00D6',
'OE': '\u0152',
'O`': '\u00D2',
'O^': '\u00D4',
'O': '\u03A9',
'Omega': '\u03A9',
'W': '\u03A9',
'Om': '\u039F',
'Omicron': '\u039F',
'O/': '\u00D8',
'O~': '\u00D5',
'Ph': '\u03A6',
'Phi': '\u03A6',
'F': '\u03A6',
'P': '\u03A0',
'Pi': '\u03A0',
'Ps': '\u03A8',
'Psi': '\u03A8',
'Y': '\u03A8',
'Rv': '\u0158',
'R': '\u03A1',
'Rho': '\u03A1',
'Sa': '\u03E0',
'Sampi': '\u03E0',
'Sv': '\u0160',
'S': '\u03A3',
'Sigma': '\u03A3',
'T': '\u03A4',
'Tau': '\u03A4',
'Tv': '\u0164',
'Th': '\u0398',
'Theta': '\u0398',
'Q': '\u0398',
'Thn': '\u00DE',
"U'": '\u00DA',
"U''": '\u0170',
'U"': '\u00DC',
'U`': '\u00D9',
'U^': '\u00DB',
'U': '\u03A5',
'Upsilon': '\u03A5',
'Uo': '\u016E',
'X': '\u039E',
'Xi': '\u039E',
"Y'": '\u00DD',
'Z': '\u0396',
'Zeta': '\u0396',
'Zv': '\u017D',
'c,': '\u00E7',
'cd': '\u00B8',
'.': '\u00B7',
'cent': '\u00A2',
'cv': '\u010D',
'ch': '\u03C7',
'chi': '\u03C7',
'c': '\u03C7',
'c.': '\u2299',
'c-': '\u2296',
'c+': '\u2295',
'c*': '\u2297',
'ccint': '\u2232',
'cl': '\u2318',
':': '\u2236',
'cmd': '\uF76A',
'===': '\u2261',
'co': '\uF3C8',
'conj': '\uF3C8',
'ct': '\uF3C9',
'cont': '\uF3B1',
'cint': '\u222E',
'ctrl': '\uF763',
'coprod': '\u2210',
'cccint': '\u2233',
'cross': '\uF4A0',
'cU': '\u03D2',
'cUpsilon': '\u03D2',
'ce': '\u03B5',
'cepsilon': '\u03B5',
'ck': '\u03F0',
'ckappa': '\u03F0',
'j': '\u03C6',
'cph': '\u03C6',
'cphi': '\u03C6',
'cp': '\u03D6',
'cpi': '\u03D6',
'cr': '\u03F1',
'crho': '\u03F1',
'cq': '\u03D1',
'cth': '\u03D1',
'ctheta': '\u03D1',
'dg': '\u2020',
'da': '\u2138',
'-': '\u2013',
'deg': '\u00B0',
' del': '\uF7D0',
'del': '\u2207',
'd': '\u03B4',
'delta': '\u03B4',
'dv': '\u010F',
'dia': '\u22C4',
'diffd': '\u2206',
'dd': '\uF74C',
'di': '\u03DD',
'digamma': '\u03DD',
'dratio': '\uF4A4',
'shift': '\uF4A3',
'dhy': '\u00AD',
'dlsep': '\uF76E',
'dpsep': '\uF76F',
'div': '\u00F7',
'.=': '\u2250',
'ddg': '\u2021',
'gg': '\uF74A',
'pp': '\uF749',
' <=': '\u21D0',
'<=>': '\u21D4',
'<==': '\u27F8',
'<==>': '\u27FA',
'==>': '\u27F9',
"''": '\u2033',
' =>': '\u21D2',
'dsa': '\uF6E6',
'dsb': '\uF6E7',
'dsc': '\uF6E8',
'dsA': '\uF7A4',
'dsB': '\uF7A5',
'dsC': '\uF7A6',
'dsD': '\uF7A7',
'dsE': '\uF7A8',
'dsF': '\uF7A9',
'dsG': '\uF7AA',
'dsH': '\uF7AB',
'dsI': '\uF7AC',
'dsJ': '\uF7AD',
'dsK': '\uF7AE',
'dsL': '\uF7AF',
'dsM': '\uF7B0',
'dsN': '\uF7B1',
'dsO': '\uF7B2',
'dsP': '\uF7B3',
'dsQ': '\uF7B4',
'dsR': '\uF7B5',
'dsS': '\uF7B6',
'dsT': '\uF7B7',
'dsU': '\uF7B8',
'dsV': '\uF7B9',
'dsW': '\uF7BA',
'dsX': '\uF7BB',
'dsY': '\uF7BC',
'dsZ': '\uF7BD',
'dsd': '\uF6E9',
'dse': '\uF6EA',
'ds8': '\uF7E3',
'dsf': '\uF6EB',
'ds5': '\uF7E0',
'ds4': '\uF7DF',
'dsg': '\uF6EC',
'dsh': '\uF6ED',
'dsi': '\uF6EE',
'dsj': '\uF6EF',
'dsk': '\uF6F0',
'dsl': '\uF6F1',
'dsm': '\uF6F2',
'dsn': '\uF6F3',
'ds9': '\uF7E4',
'dso': '\uF6F4',
'ds1': '\uF7DC',
'dsp': '\uF6F5',
'dsq': '\uF6F6',
'dsr': '\uF6F7',
'dss': '\uF6F8',
'ds7': '\uF7E2',
'ds6': '\uF7E1',
'dst': '\uF6F9',
'ds3': '\uF7DE',
'ds2': '\uF7DD',
'dsu': '\uF6FA',
'dsv': '\uF6FB',
'dsw': '\uF6FC',
'dsx': '\uF6FD',
'dsy': '\uF6FE',
'dsz': '\uF6FF',
'ds0': '\uF7DB',
' ||': '\u2225',
'dbv': '\uF755',
'd!': '\u00A1',
'd?': '\u00BF',
'dT': '\u22A4',
"e'": '\u00E9',
'e-': '\u0113',
'eu': '\u0115',
'e"': '\u00EB',
'e`': '\u00E8',
'ev': '\u011B',
'e^': '\u00EA',
'el': '\u2208',
'elem': '\u2208',
'...': '\u2026',
'eci': '\u25CB',
'es': '\u2205',
'esci': '\u25E6',
'essq': '\u25FB',
'esq': '\u25A1',
'ent': '\uF7D4',
'e': '\u03F5',
'epsilon': '\u03F5',
'==': '\uF431',
'=~': '\u2242',
'equi': '\u21CC',
'equiv': '\u29E6',
' esc': '\uF769',
'et': '\u03B7',
'eta': '\u03B7',
'h': '\u03B7',
'd-': '\u00F0',
'ex': '\u2203',
'ee': '\uF74D',
'fci': '\u25CF',
'fsci': '\uF750',
'fssq': '\u25FC',
'fsq': '\u25A0',
'fvssq': '\u25AA',
'fs': '\u03C2',
'*5': '\u2605',
'fa': '\u2200',
'$a': '\uF800',
'$b': '\uF801',
'$c': '\uF802',
'$A': '\uF81A',
'$B': '\uF81B',
'$C': '\uF81C',
'$D': '\uF81D',
'$E': '\uF81E',
'$F': '\uF81F',
'$G': '\uF820',
'$H': '\uF821',
'$I': '\uF822',
'$J': '\uF823',
'$K': '\uF824',
'$L': '\uF825',
'$M': '\uF826',
'$N': '\uF827',
'$O': '\uF828',
'$P': '\uF829',
'$Q': '\uF82A',
'$R': '\uF82B',
'$S': '\uF82C',
'$T': '\uF82D',
'$U': '\uF82E',
'$V': '\uF82F',
'$W': '\uF830',
'$X': '\uF831',
'$Y': '\uF832',
'$Z': '\uF833',
'$d': '\uF803',
'$e': '\uF804',
'$f': '\uF805',
'$g': '\uF806',
'$h': '\uF807',
'$i': '\uF808',
'$j': '\uF809',
'$k': '\uF80A',
'$l': '\uF80B',
'$m': '\uF80C',
'$n': '\uF80D',
'$o': '\uF80E',
'$p': '\uF80F',
'$q': '\uF810',
'$r': '\uF811',
'$s': '\uF812',
'$t': '\uF813',
'$u': '\uF814',
'$v': '\uF815',
'$w': '\uF816',
'$x': '\uF817',
'$y': '\uF818',
'$z': '\uF819',
':-@': '\uF721',
'fn': '\uF4A1',
'g': '\u03B3',
'gamma': '\u03B3',
'gi': '\u2137',
'goa': '\uF6CC',
'gob': '\uF6CD',
'goc': '\uF6CE',
'goA': '\uF78A',
'goB': '\uF78B',
'goC': '\u212D',
'goD': '\uF78D',
'goE': '\uF78E',
'goF': '\uF78F',
'goG': '\uF790',
'goH': '\u210C',
'goI': '\u2111',
'goJ': '\uF793',
'goK': '\uF794',
'goL': '\uF795',
'goM': '\uF796',
'goN': '\uF797',
'goO': '\uF798',
'goP': '\uF799',
'goQ': '\uF79A',
'goR': '\u211C',
'goS': '\uF79C',
'goT': '\uF79D',
'goU': '\uF79E',
'goV': '\uF79F',
'goW': '\uF7A0',
'goX': '\uF7A1',
'goY': '\uF7A2',
'goZ': '\u2128',
'god': '\uF6CF',
'goe': '\uF6D0',
'go8': '\uF7ED',
'gof': '\uF6D1',
'go5': '\uF7EA',
'go4': '\uF7E9',
'gog': '\uF6D2',
'goh': '\uF6D3',
'goi': '\uF6D4',
'goj': '\uF6D5',
'gok': '\uF6D6',
'gol': '\uF6D7',
'gom': '\uF6D8',
'gon': '\uF6D9',
'go9': '\uF7EF',
'goo': '\uF6DA',
'go1': '\uF7E6',
'gop': '\uF6DB',
'goq': '\uF6DC',
'gor': '\uF6DD',
'gos': '\uF6DE',
'go7': '\uF7EC',
'go6': '\uF7EB',
'got': '\uF6DF',
'go3': '\uF7E8',
'go2': '\uF7E7',
'gou': '\uF6E0',
'gov': '\uF6E1',
'gow': '\uF6E2',
'gox': '\uF6E3',
'goy': '\uF6E4',
'goz': '\uF6E5',
'go0': '\uF7E5',
'gci': '\uF753',
'gsq': '\uF752',
'>=': '\u2265',
'>/': '\u2A7E',
'>~': '\u2273',
'hck': '\u02C7',
':)': '\u263A',
':-)': '\u263A',
'hb': '\u210F',
'hc': '\uF3CE',
'hline': '\u2500',
'h=': '\u224F',
"i'": '\u00ED',
'iu': '\u012D',
'i"': '\u00EF',
'i`': '\u00EC',
'i^': '\u00EE',
'ii': '\uF74E',
'jj': '\uF74F',
'+': '\uF39E',
'=>': '\uF523',
'inf': '\u221E',
'int': '\u222B',
'inter': '\u22C2',
'@': '\uF76D',
',': '\uF765',
'is': '\uF360',
'i': '\u03B9',
'iota': '\u03B9',
'k': '\u03BA',
'kappa': '\u03BA',
'ko': '\u03DF',
'koppa': '\u03DF',
'l': '\u03BB',
'lambda': '\u03BB',
'<': '\u2329',
'<-': '\u2190',
'l|': '\uF603',
'lc': '\u2308',
'[[': '\u301A',
'l||': '\uF605',
'lf': '\u230A',
'g<<': '\u00AB',
'[': '\uF76B',
'<->': '\u2194',
'lT': '\u22A3',
'<=': '\u2264',
'</': '\u2A7D',
'<~': '\u2272',
'_': '\uF754',
'ls': '\uF754',
'--': '\u2014',
'<--': '\u27F5',
'<-->': '\u27F7',
'-->': '\u27F6',
'l/': '\u0142',
'math': '\uF757',
' ': '\u205F',
'mho': '\u2127',
'mi': '\u00B5',
'-+': '\u2213',
'm': '\u03BC',
'mu': '\u03BC',
'nand': '\u22BC',
'- ': '\uF383',
'- ': '\uF384',
'- ': '\uF382',
'- ': '\uF380',
':-|': '\uF722',
'nv': '\u0148',
'nb': '\u2060',
'nbs': '\u00A0',
'nor': '\u22BD',
'!===': '\u2262',
'!||': '\u2226',
'!el': '\u2209',
'!elem': '\u2209',
'!=': '\u2260',
'!=~': '\uF400',
'!ex': '\u2204',
'!>': '\u226F',
'!>=': '\u2271',
'!>/': '\uF429',
'!>~': '\u2275',
'!h=': '\uF401',
'!<=': '\u2270',
'!<': '\u226E',
'!</': '\uF424',
'!<~': '\u2274',
'!': '\u00AC',
'not': '\u00AC',
'!mem': '\u220C',
'!sub': '\u2284',
'!sub=': '\u2288',
'!sup': '\u2285',
'!sup=': '\u2289',
'!~': '\u2241',
'!~=': '\u2244',
'!~==': '\u2247',
'!~~': '\u2249',
'!|': '\u2224',
'n~': '\u00F1',
'n': '\u03BD',
'nu': '\u03BD',
'null': '\uF3A0',
"o'": '\u00F3',
"o''": '\u0151',
'o"': '\u00F6',
'oe': '\u0153',
'o`': '\u00F2',
'o^': '\u00F4',
'o': '\u03C9',
'omega': '\u03C9',
'w': '\u03C9',
'om': '\u03BF',
'omicron': '\u03BF',
'opt': '\uF7D2',
'||': '\u2228',
'or': '\u2228',
'o/': '\u00F8',
'o~': '\u00F5',
'o{': '\uFE37',
'o[': '\u23B4',
'o(': '\uFE35',
'pd': '\u2202',
'ph': '\u03D5',
'phi': '\u03D5',
'f': '\u03D5',
'p': '\u03C0',
'pi': '\u03C0',
'pw': '\uF361',
'pl': '\uF528',
'+-': '\u00B1',
"'": '\u2032',
'prod': '\u220F',
'prop': '\u221D',
'ps': '\u03C8',
'psi': '\u03C8',
'y': '\u03C8',
'rtm': '\u00AE',
'ret': '\u21B5',
' ret': '\uF766',
'``': '\u2036',
'mem': '\u220B',
'`': '\u2035',
'rv': '\u0159',
'r': '\u03C1',
'rho': '\u03C1',
'>': '\u232A',
' ->': '\u2192',
'r|': '\uF604',
'rc': '\u2309',
']]': '\u301B',
'r||': '\uF606',
'rf': '\u230B',
'g>>': '\u00BB',
']': '\uF76C',
'rT': '\u22A2',
'vec': '\u21C0',
'->': '\uF522',
':>': '\uF51F',
':-(': '\u2639',
'sa': '\u03E0',
'sampi': '\u03E0',
'sca': '\uF6B2',
'scb': '\uF6B3',
'scc': '\uF6B4',
'scA': '\uF770',
'scB': '\u212C',
'scC': '\uF772',
'scD': '\uF773',
'scE': '\u2130',
'scF': '\u2131',
'scG': '\uF776',
'scH': '\u210B',
'scI': '\u2110',
'scJ': '\uF779',
'scK': '\uF77A',
'scL': '\u2112',
'scM': '\u2133',
'scN': '\uF77D',
'scO': '\uF77E',
'scP': '\u2118',
'scQ': '\uF780',
'scR': '\u211B',
'scS': '\uF782',
'scT': '\uF783',
'scU': '\uF784',
'scV': '\uF785',
'scW': '\uF786',
'scX': '\uF787',
'scY': '\uF788',
'scZ': '\uF789',
'scd': '\uF6B5',
'sce': '\u212F',
'sc8': '\uF7F8',
'scf': '\uF6B7',
'sc5': '\uF7F5',
'sc4': '\uF7F4',
'scg': '\u210A',
'sch': '\uF6B9',
'sci': '\uF6BA',
'scj': '\uF6BB',
'sck': '\uF6BC',
'scl': '\u2113',
'scm': '\uF6BE',
'scn': '\uF6BF',
'sc9': '\uF7F9',
'sco': '\u2134',
'sc1': '\uF7F1',
'scp': '\uF6C1',
'scq': '\uF6C2',
'scr': '\uF6C3',
'scs': '\uF6C4',
'sc7': '\uF7F7',
'sc6': '\uF7F6',
'sct': '\uF6C5',
'sc3': '\uF7F3',
'sc2': '\uF7F2',
'scu': '\uF6C6',
'scv': '\uF6C7',
'scw': '\uF6C8',
'scx': '\uF6C9',
'scy': '\uF6CA',
'scz': '\uF6CB',
'sc0': '\uF7F0',
'spl': '\uF527',
'sv': '\u0161',
's': '\u03C3',
'sigma': '\u03C3',
'*6': '\u2736',
'sc': '\u2218',
'space': '\u2423',
'spc': '\uF7BF',
'sqrt': '\u221A',
'sq': '\uF520',
'star': '\u22C6',
'sti': '\u03DB',
'stigma': '\u03DB',
'sub': '\u2282',
'sub=': '\u2286',
'st': '\u220D',
'sum': '\u2211',
'sup': '\u2283',
'sup=': '\u2287',
'sz': '\u00DF',
'ss': '\u00DF',
'tab': '\uF7BE',
't': '\u03C4',
'tau': '\u03C4',
'tv': '\u0165',
'tf': '\u2234',
'th': '\u03B8',
'theta': '\u03B8',
'q': '\u03B8',
' ': '\u2005',
' ': '\u2009',
'thn': '\u00FE',
'~': '\u223C',
'~=': '\u2243',
'~==': '\u2245',
'~~': '\u2248',
'*': '\u00D7',
'tm': '\u2122',
'tr': '\uF3C7',
"u'": '\u00FA',
"u''": '\u0171',
'u"': '\u00FC',
'u`': '\u00F9',
'u^': '\u00FB',
'u{': '\uFE38',
'u[': '\u23B5',
'u(': '\uFE36',
'un': '\u22C3',
'u': '\u03C5',
'upsilon': '\u03C5',
'uT': '\u22A5',
'uo': '\u016F',
'v': '\u22C1',
' |': '\u2223',
'vline': '\u2502',
'|': '\uF432',
' ': '\u200A',
'^': '\u22C0',
'wp': '\u2118',
'wf': '\uF720',
'wolf': '\uF720',
'x': '\u03BE',
'xi': '\u03BE',
'xnor': '\uF4A2',
'xor': '\u22BB',
"y'": '\u00FD',
'z': '\u03B6',
'zeta': '\u03B6',
'zv': '\u017E',
}
|
bnjones/Mathics
|
mathics/core/characters.py
|
Python
|
gpl-3.0
| 43,648
|
#----------------------------------------------------------------------
# I-SIMPA (http://i-simpa.ifsttar.fr). This file is part of I-SIMPA.
#
# I-SIMPA is a GUI for 3D numerical sound propagation modelling dedicated
# to scientific acoustic simulations.
# Copyright (C) 2007-2014 - IFSTTAR - Judicael Picaut, Nicolas Fortin
#
# I-SIMPA is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# I-SIMPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or
# see <http://ww.gnu.org/licenses/>
#
# For more information, please consult: <http://i-simpa.ifsttar.fr> or
# send an email to i-simpa@ifsttar.fr
#
# To contact Ifsttar, write to Ifsttar, 14-20 Boulevard Newton
# Cite Descartes, Champs sur Marne F-77447 Marne la Vallee Cedex 2 FRANCE
# or write to scientific.computing@ifsttar.fr
# ----------------------------------------------------------------------
# -*- coding: cp1252 -*-
# Titre: Script de conversion en fichiers CSV
# Description: Script de conversion des fichiers de sortie créés par SPPS en fichier CSV
# Date: Avril 2009
# Auteur: N. Fortin et J. Picaut
# Contact: Judicael.Picaut@lcpc.fr
# Chargement des librairies
import os
import libsimpa as ls
def GabeToCsv(filepath,csvpath):
"""
Converti un fichier GABE (Generic Array Binary Exchange) en format CSV (Comma Separated Values)
"""
# Instanciation du lecteur
reader=ls.Gabe_rw()
# Lecture du fichier gabe
if reader.Load(filepath):
# Conversion en liste
data=reader.ToList()
# Rotation des données (les colonnes deviennent des lignes)
data=zip(*data)
# Ecriture des données
fich=open(csvpath,'w')
for line in data:
firstcol=True
for col in line:
if not firstcol:
fich.write(",")
else:
firstcol=False
fich.write(str(col)) # Ecriture de la cellule et virgule
fich.write("\n") # Retour à la ligne
fich.close()
|
Ifsttar/I-Simpa
|
src/python_bindings/samples/spps_conversion_resultats.py
|
Python
|
gpl-3.0
| 2,657
|
import sys
import subprocess
from .exceptions import PyperclipException
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
text_type = str if PY2 else str
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
from PyQt4.QtGui import QApplication
app = QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xclip', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xclip():
p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
|
tvaddonsco/tva-release-repo
|
matrix/plugin.video.realizerx/resources/lib/modules/pyperclip/clipboards.py
|
Python
|
gpl-3.0
| 3,970
|
# -*- coding: utf-8 -*-
{
'name': "BestJa: Application Moderation",
'summary': "Two stage recruitment process",
'description': """
BestJa Application Moderation
=============================
Split recruitment process into two separate stages.
The first ("preliminary") stage is handled by offer moderators.
The second stage is handled by the recruiting organization itself.""",
'author': "Laboratorium EE",
'website': "http://www.laboratorium.ee",
'version': '0.1',
'depends': [
'base',
'bestja_offers',
'bestja_offers_moderation',
],
'data': [
'views/offer.xml',
'menu.xml',
'messages.xml',
'security/security.xml',
],
}
|
KrzysiekJ/bestja
|
addons/bestja_application_moderation/__openerp__.py
|
Python
|
agpl-3.0
| 720
|
# -*- coding: utf-8 -*-
"""
Tests for the Shopping Cart Models
"""
import datetime
from textwrap import dedent
import pytest
import pytz
from django.conf import settings
from mock import patch
import six
from six import StringIO
from six import text_type
from course_modes.models import CourseMode
from shoppingcart.models import (
CertificateItem,
CourseRegCodeItemAnnotation,
Order,
PaidCourseRegistration,
PaidCourseRegistrationAnnotation
)
from shoppingcart.views import initialize_report
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ReportTypeTests(ModuleStoreTestCase):
"""
Tests for the models used to generate certificate status reports
"""
FIVE_MINS = datetime.timedelta(minutes=5)
@patch('student.models.CourseEnrollment.refund_cutoff_date')
def setUp(self, cutoff_date):
super(ReportTypeTests, self).setUp()
cutoff_date.return_value = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=1)
# Need to make a *lot* of users for this one
self.first_verified_user = UserFactory.create(profile__name="John Doe")
self.second_verified_user = UserFactory.create(profile__name="Jane Deer")
self.first_audit_user = UserFactory.create(profile__name="Joe Miller")
self.second_audit_user = UserFactory.create(profile__name="Simon Blackquill")
self.third_audit_user = UserFactory.create(profile__name="Super Mario")
self.honor_user = UserFactory.create(profile__name="Princess Peach")
self.first_refund_user = UserFactory.create(profile__name="King Bowsér")
self.second_refund_user = UserFactory.create(profile__name="Súsan Smith")
# Two are verified, three are audit, one honor
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name=u'Robot Super Course')
self.course_key = self.course.id
course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
course_mode.save()
course_mode2 = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
course_mode2.save()
# User 1 & 2 will be verified
self.cart1 = Order.get_cart_for_user(self.first_verified_user)
CertificateItem.add_to_order(self.cart1, self.course_key, self.cost, 'verified')
self.cart1.purchase()
self.cart2 = Order.get_cart_for_user(self.second_verified_user)
CertificateItem.add_to_order(self.cart2, self.course_key, self.cost, 'verified')
self.cart2.purchase()
# Users 3, 4, and 5 are audit
CourseEnrollment.enroll(self.first_audit_user, self.course_key, "audit")
CourseEnrollment.enroll(self.second_audit_user, self.course_key, "audit")
CourseEnrollment.enroll(self.third_audit_user, self.course_key, "audit")
# User 6 is honor
CourseEnrollment.enroll(self.honor_user, self.course_key, "honor")
self.now = datetime.datetime.now(pytz.UTC)
# Users 7 & 8 are refunds
self.cart = Order.get_cart_for_user(self.first_refund_user)
CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'verified')
self.cart.purchase()
CourseEnrollment.unenroll(self.first_refund_user, self.course_key)
self.cart = Order.get_cart_for_user(self.second_refund_user)
CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'verified')
self.cart.purchase(self.second_refund_user.username, self.course_key)
CourseEnrollment.unenroll(self.second_refund_user, self.course_key)
self.test_time = datetime.datetime.now(pytz.UTC)
first_refund = CertificateItem.objects.get(id=3)
first_refund.fulfilled_time = self.test_time
first_refund.refund_requested_time = self.test_time
first_refund.save()
second_refund = CertificateItem.objects.get(id=4)
second_refund.fulfilled_time = self.test_time
second_refund.refund_requested_time = self.test_time
second_refund.save()
self.CORRECT_REFUND_REPORT_CSV = dedent(u"""
Order Number,Customer Name,Date of Original Transaction,Date of Refund,Amount of Refund,Service Fees (if any)
3,King Bowsér,{time_str},{time_str},40.00,0.00
4,Súsan Smith,{time_str},{time_str},40.00,0.00
""".format(time_str=str(self.test_time)))
self.CORRECT_CERT_STATUS_CSV = dedent("""
University,Course,Course Announce Date,Course Start Date,Course Registration Close Date,Course Registration Period,Total Enrolled,Audit Enrollment,Honor Code Enrollment,Verified Enrollment,Gross Revenue,Gross Revenue over the Minimum,Number of Verified Students Contributing More than the Minimum,Number of Refunds,Dollars Refunded
MITx,999 Robot Super Course,,,,,6,3,1,2,80.00,0.00,0,2,80.00
""".format(time_str=str(self.test_time)))
self.CORRECT_UNI_REVENUE_SHARE_CSV = dedent("""
University,Course,Number of Transactions,Total Payments Collected,Service Fees (if any),Number of Successful Refunds,Total Amount of Refunds
MITx,999 Robot Super Course,6,80.00,0.00,2,80.00
""".format(time_str=str(self.test_time)))
def test_refund_report_rows(self):
report = initialize_report("refund_report", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)
refunded_certs = report.rows()
# check that we have the right number
self.assertEqual(len(list(refunded_certs)), 2)
self.assertTrue(CertificateItem.objects.get(user=self.first_refund_user, course_id=self.course_key))
self.assertTrue(CertificateItem.objects.get(user=self.second_refund_user, course_id=self.course_key))
def test_refund_report_purchased_csv(self):
"""
Tests that a generated purchase report CSV is as we expect
"""
report = initialize_report("refund_report", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)
csv_file = StringIO()
report.write_csv(csv_file)
csv = csv_file.getvalue()
csv_file.close()
# Using excel mode csv, which automatically ends lines with \r\n, so need to convert to \n
self.assertEqual(
csv.replace('\r\n', '\n').strip() if six.PY3 else csv.replace('\r\n', '\n').strip().decode('utf-8'),
self.CORRECT_REFUND_REPORT_CSV.strip()
)
@pytest.mark.skip(reason="Fails in django 2.1 and above and the app is deprecated, hence skipping it")
def test_basic_cert_status_csv(self):
report = initialize_report("certificate_status", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS, 'A', 'Z')
csv_file = StringIO()
report.write_csv(csv_file)
csv = csv_file.getvalue()
self.assertEqual(csv.replace('\r\n', '\n').strip(), self.CORRECT_CERT_STATUS_CSV.strip())
@pytest.mark.skip(reason="Fails in django 2.1 and above and the app is deprecated, hence skipping it")
def test_basic_uni_revenue_share_csv(self):
report = initialize_report("university_revenue_share", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS, 'A', 'Z')
csv_file = StringIO()
report.write_csv(csv_file)
csv = csv_file.getvalue()
self.assertEqual(csv.replace('\r\n', '\n').strip(), self.CORRECT_UNI_REVENUE_SHARE_CSV.strip())
class ItemizedPurchaseReportTest(ModuleStoreTestCase):
"""
Tests for the models used to generate itemized purchase reports
"""
FIVE_MINS = datetime.timedelta(minutes=5)
TEST_ANNOTATION = u'Ba\xfc\u5305'
def setUp(self):
super(ItemizedPurchaseReportTest, self).setUp()
self.user = UserFactory.create()
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name=u'Robot Super Course')
self.course_key = self.course.id
course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
course_mode.save()
course_mode2 = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
course_mode2.save()
self.annotation = PaidCourseRegistrationAnnotation(course_id=self.course_key, annotation=self.TEST_ANNOTATION)
self.annotation.save()
self.course_reg_code_annotation = CourseRegCodeItemAnnotation(course_id=self.course_key, annotation=self.TEST_ANNOTATION)
self.course_reg_code_annotation.save()
self.cart = Order.get_cart_for_user(self.user)
self.reg = PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=course_mode.mode_slug)
self.cert_item = CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'verified')
self.cart.purchase()
self.now = datetime.datetime.now(pytz.UTC)
paid_reg = PaidCourseRegistration.objects.get(course_id=self.course_key, user=self.user)
paid_reg.fulfilled_time = self.now
paid_reg.refund_requested_time = self.now
paid_reg.save()
cert = CertificateItem.objects.get(course_id=self.course_key, user=self.user)
cert.fulfilled_time = self.now
cert.refund_requested_time = self.now
cert.save()
self.CORRECT_CSV = dedent((b"""
Purchase Time,Order ID,Status,Quantity,Unit Cost,Total Cost,Currency,Description,Comments
%s,1,purchased,1,40.00,40.00,usd,Registration for Course: Robot Super Course,Ba\xc3\xbc\xe5\x8c\x85
%s,1,purchased,1,40.00,40.00,usd,verified cert for course Robot Super Course,
""" % (six.b(str(self.now)), six.b(str(self.now)))).decode('utf-8'))
def test_purchased_items_btw_dates(self):
report = initialize_report("itemized_purchase_report", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)
purchases = report.rows()
# since there's not many purchases, just run through the generator to make sure we've got the right number
self.assertEqual(len(list(purchases)), 2)
report = initialize_report("itemized_purchase_report", self.now + self.FIVE_MINS, self.now + self.FIVE_MINS + self.FIVE_MINS)
no_purchases = report.rows()
self.assertEqual(len(list(no_purchases)), 0)
def test_purchased_csv(self):
"""
Tests that a generated purchase report CSV is as we expect
"""
report = initialize_report("itemized_purchase_report", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)
# Note :In this we are using six.StringIO as memory buffer to read/write csv for testing.
# In case of py2 that will be BytesIO so we will need to decode the value before comparison.
csv_file = StringIO()
report.write_csv(csv_file)
csv = csv_file.getvalue() if six.PY3 else csv_file.getvalue().decode('utf-8')
csv_file.close()
# Using excel mode csv, which automatically ends lines with \r\n, so need to convert to \n
self.assertEqual(csv.replace('\r\n', '\n').strip(), self.CORRECT_CSV.strip())
def test_csv_report_no_annotation(self):
"""
Fill in gap in test coverage. csv_report_comments for PaidCourseRegistration instance with no
matching annotation
"""
# delete the matching annotation
self.annotation.delete()
self.assertEqual("", self.reg.csv_report_comments)
def test_paidcourseregistrationannotation_unicode(self):
"""
Fill in gap in test coverage. __str__ method of PaidCourseRegistrationAnnotation
"""
self.assertEqual(text_type(self.annotation), u'{} : {}'.format(text_type(self.course_key), self.TEST_ANNOTATION))
def test_courseregcodeitemannotationannotation_unicode(self):
"""
Fill in gap in test coverage. __str__ method of CourseRegCodeItemAnnotation
"""
self.assertEqual(text_type(self.course_reg_code_annotation), u'{} : {}'.format(text_type(self.course_key), self.TEST_ANNOTATION))
|
msegado/edx-platform
|
lms/djangoapps/shoppingcart/tests/test_reports.py
|
Python
|
agpl-3.0
| 12,763
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import FieldDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from django.utils.timezone import now
from model_utils.managers import QueryManager
from model_utils.fields import AutoCreatedField, AutoLastModifiedField, \
StatusField, MonitorField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
class TimeFramedModel(models.Model):
"""
An abstract base class model that provides ``start``
and ``end`` fields to record a timeframe.
"""
start = models.DateTimeField(_('start'), null=True, blank=True)
end = models.DateTimeField(_('end'), null=True, blank=True)
class Meta:
abstract = True
class StatusModel(models.Model):
"""
An abstract base class model with a ``status`` field that
automatically uses a ``STATUS`` class attribute of choices, a
``status_changed`` date-time field that records when ``status``
was last modified, and an automatically-added manager for each
status that returns objects with that status only.
"""
status = StatusField(_('status'))
status_changed = MonitorField(_('status changed'), monitor='status')
class Meta:
abstract = True
def add_status_query_managers(sender, **kwargs):
"""
Add a Querymanager for each status item dynamically.
"""
if not issubclass(sender, StatusModel):
return
for value, display in getattr(sender, 'STATUS', ()):
if _field_exists(sender, value):
raise ImproperlyConfigured(
"StatusModel: Model '%s' has a field named '%s' which "
"conflicts with a status of the same name."
% (sender.__name__, value)
)
sender.add_to_class(value, QueryManager(status=value))
def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return
if _field_exists(sender, 'timeframed'):
raise ImproperlyConfigured(
"Model '%s' has a field named 'timeframed' "
"which conflicts with the TimeFramedModel manager."
% sender.__name__
)
sender.add_to_class('timeframed', QueryManager(
(models.Q(start__lte=now) | models.Q(start__isnull=True)) &
(models.Q(end__gte=now) | models.Q(end__isnull=True))
))
models.signals.class_prepared.connect(add_status_query_managers)
models.signals.class_prepared.connect(add_timeframed_query_manager)
def _field_exists(model_class, field_name):
return field_name in [f.attname for f in model_class._meta.local_fields]
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/model_utils/models.py
|
Python
|
agpl-3.0
| 3,021
|
"""Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.stringify import stringify_children
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.xml_module import XmlDescriptor
from xblock.core import Scope, String, Dict, Boolean, List
log = logging.getLogger(__name__)
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(help="Display name for this module", scope=Scope.settings)
voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.user_state, default=False)
poll_answer = String(help="Student answer", scope=Scope.user_state, default='')
poll_answers = Dict(help="All possible answers for the poll fro other students", scope=Scope.content)
answers = List(help="Poll answers from xml", scope=Scope.content, default=[])
question = String(help="Poll question", scope=Scope.content, default='')
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [resource_string(__name__, 'js/src/poll/logme.js'),
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = '<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
child_str = '<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer['text'])
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
|
rationalAgent/edx-platform-custom
|
common/lib/xmodule/xmodule/poll_module.py
|
Python
|
agpl-3.0
| 7,481
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 NUMA Extreme Systems (www.numaes.com) for Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
from openerp.report import report_sxw
class sunat_1_1_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(sunat_1_1_report, self).__init__(cr, uid, name, context)
self.localcontext.update( {
'time': time,
})
self.context = context
report_sxw.report_sxw('report.l10n_pe.sunat_1_1', 'l10n_pe.ple_1_1',
'addons/l10n_pe_ple01/report/sunat_1_1.rml', parser=sunat_1_1_report, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Jgarcia-IAS/SAT
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/l10n_pe_ple01/report/sunat_1_1_print.py
|
Python
|
agpl-3.0
| 1,980
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyKiwisolver(PythonPackage):
"""A fast implementation of the Cassowary constraint solver"""
homepage = "https://github.com/nucleic/kiwi"
pypi = "kiwisolver/kiwisolver-1.1.0.tar.gz"
version('1.3.2', sha256='fc4453705b81d03568d5b808ad8f09c77c47534f6ac2e72e733f9ca4714aa75c')
version('1.3.1', sha256='950a199911a8d94683a6b10321f9345d5a3a8433ec58b217ace979e18f16e248')
version('1.3.0', sha256='14f81644e1f3bf01fbc8b9c990a7889e9bb4400c4d0ff9155aa0bdd19cce24a9')
version('1.2.0', sha256='247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f')
version('1.1.0', sha256='53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75')
version('1.0.1', sha256='ce3be5d520b4d2c3e5eeb4cd2ef62b9b9ab8ac6b6fedbaa0e39cdb6f50644278')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'), when='@1.2.0:')
depends_on('python@3.6:', type=('build', 'run'), when='@1.3.0:')
depends_on('python@3.7:', type=('build', 'run'), when='@1.3.2:')
depends_on('py-setuptools', type='build')
depends_on('py-cppy@1.1.0:', type='build', when='@1.2.0:')
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-kiwisolver/package.py
|
Python
|
lgpl-2.1
| 1,376
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
m = _parser(value.strip())
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self, context=None):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical(context=self)
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999,
Emin=-999999,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
|
lfcnassif/MultiContentViewer
|
release/modules/ext/libreoffice/program/python-core-3.3.0/lib/decimal.py
|
Python
|
lgpl-3.0
| 228,485
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the intersection of Coords
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
import numpy as np
import iris
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests.stock
class TestCubeIntersectTheoretical(tests.IrisTest):
def test_simple_intersect(self):
cube = iris.cube.Cube(
np.array(
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9],
],
dtype=np.int32,
)
)
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 90 - 180,
"longitude",
units="degrees",
coord_system=lonlat_cs,
),
1,
)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 45 - 90,
"latitude",
units="degrees",
coord_system=lonlat_cs,
),
0,
)
cube.add_aux_coord(
iris.coords.DimCoord(
points=np.int32(11), long_name="pressure", units="Pa"
)
)
cube.rename("temperature")
cube.units = "K"
cube2 = iris.cube.Cube(
np.array(
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 50],
],
dtype=np.int32,
)
)
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube2.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 90,
"longitude",
units="degrees",
coord_system=lonlat_cs,
),
1,
)
cube2.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 45 - 90,
"latitude",
units="degrees",
coord_system=lonlat_cs,
),
0,
)
cube2.add_aux_coord(
iris.coords.DimCoord(
points=np.int32(11), long_name="pressure", units="Pa"
)
)
cube2.rename("")
r = iris.analysis.maths.intersection_of_cubes(cube, cube2)
self.assertCML(r, ("cdm", "test_simple_cube_intersection.cml"))
class TestCoordIntersect(tests.IrisTest):
def test_commutative(self):
step = 4.0
c1 = iris.coords.DimCoord(np.arange(100) * step)
offset_points = c1.points.copy()
offset_points -= step * 30
c2 = c1.copy(points=offset_points)
i1 = c1.intersect(c2)
i2 = c2.intersect(c1)
self.assertEqual(i1, i2)
if __name__ == "__main__":
tests.main()
|
SciTools/iris
|
lib/iris/tests/test_intersect.py
|
Python
|
lgpl-3.0
| 3,364
|
"""
Support for LIFX Cloud scenes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/scene.lifx_cloud/
"""
import asyncio
import logging
import voluptuous as vol
import aiohttp
import async_timeout
from homeassistant.components.scene import Scene
from homeassistant.const import (CONF_PLATFORM, CONF_TOKEN, CONF_TIMEOUT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import (async_get_clientsession)
_LOGGER = logging.getLogger(__name__)
LIFX_API_URL = 'https://api.lifx.com/v1/{0}'
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'lifx_cloud',
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
# pylint: disable=unused-argument
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the scenes stored in the LIFX Cloud."""
token = config.get(CONF_TOKEN)
timeout = config.get(CONF_TIMEOUT)
headers = {
"Authorization": "Bearer %s" % token,
}
url = LIFX_API_URL.format('scenes')
try:
httpsession = async_get_clientsession(hass)
with async_timeout.timeout(timeout, loop=hass.loop):
scenes_resp = yield from httpsession.get(url, headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
return False
status = scenes_resp.status
if status == 200:
data = yield from scenes_resp.json()
devices = []
for scene in data:
devices.append(LifxCloudScene(hass, headers, timeout, scene))
async_add_devices(devices)
return True
elif status == 401:
_LOGGER.error("Unauthorized (bad token?) on %s", url)
return False
_LOGGER.error("HTTP error %d on %s", scenes_resp.status, url)
return False
class LifxCloudScene(Scene):
"""Representation of a LIFX Cloud scene."""
def __init__(self, hass, headers, timeout, scene_data):
"""Initialize the scene."""
self.hass = hass
self._headers = headers
self._timeout = timeout
self._name = scene_data["name"]
self._uuid = scene_data["uuid"]
@property
def name(self):
"""Return the name of the scene."""
return self._name
@asyncio.coroutine
def async_activate(self):
"""Activate the scene."""
url = LIFX_API_URL.format('scenes/scene_id:%s/activate' % self._uuid)
try:
httpsession = async_get_clientsession(self.hass)
with async_timeout.timeout(self._timeout, loop=self.hass.loop):
yield from httpsession.put(url, headers=self._headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
|
MungoRae/home-assistant
|
homeassistant/components/scene/lifx_cloud.py
|
Python
|
apache-2.0
| 2,931
|
# -*- coding: utf-8 -*-
"""
openload.io urlresolver plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib2
from HTMLParser import HTMLParser
from urlresolver9 import common
from urlresolver9.resolver import ResolverError
net = common.Net()
def get_media_url(url):
try:
HTTP_HEADER = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Referer': url} # 'Connection': 'keep-alive'
html = net.http_GET(url, headers=HTTP_HEADER).content
hiddenurl = HTMLParser().unescape(re.search('hiddenurl">(.+?)<\/span>', html, re.IGNORECASE).group(1))
s = []
for i in hiddenurl:
j = ord(i)
if (j >= 33 & j <= 126):
s.append(chr(33 + ((j + 14) % 94)))
else:
s.append(chr(j))
res = ''.join(s)
videoUrl = 'https://openload.co/stream/{0}?mime=true'.format(res)
dtext = videoUrl.replace('https', 'http')
headers = {'User-Agent': HTTP_HEADER['User-Agent']}
req = urllib2.Request(dtext, None, headers)
res = urllib2.urlopen(req)
videourl = res.geturl()
res.close()
return videourl
except Exception as e:
common.log_utils.log_debug('Exception during openload resolve parse: %s' % e)
raise
raise ResolverError('Unable to resolve openload.io link. Filelink not found.')
|
mrknow/filmkodi
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/ol_gmu.py
|
Python
|
apache-2.0
| 2,291
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
from nose.tools import nottest
import numpy as np
from neon.util.testing import assert_tensor_equal
@attr('cuda')
class TestGPUTensor(object):
def setup(self):
from neon.backends.cc2 import GPUTensor
self.gpt = GPUTensor
def test_empty_creation(self):
tns = self.gpt([])
expected_shape = (0, )
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_1d_creation(self):
tns = self.gpt([1, 2, 3, 4])
expected_shape = (4, )
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_creation(self):
tns = self.gpt([[1, 2], [3, 4]])
expected_shape = (2, 2)
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_ndarray_creation(self):
tns = self.gpt(np.array([[1.5, 2.5], [3.3, 9.2],
[0.111111, 5]]))
assert tns.shape == (3, 2)
@nottest # TODO: add >2 dimension support to cudanet
def test_higher_dim_creation(self):
shapes = ((1, 1, 1), (1, 2, 3, 4), (1, 2, 3, 4, 5, 6, 7))
for shape in shapes:
tns = self.gpt(np.empty(shape))
assert tns.shape == shape
def test_str(self):
tns = self.gpt([[1, 2], [3, 4]])
assert str(tns) == "[[ 1. 2.]\n [ 3. 4.]]"
def test_scalar_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[1, 0]
assert res.shape == (1, 1)
assert_tensor_equal(res, self.gpt([[3]]))
def test_range_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[0:2, 0]
assert res.shape == (2, 1)
assert_tensor_equal(res, self.gpt([1, 3]))
@nottest # TODO: add scalar assignment to self.gpt class
def test_scalar_slice_assignment(self):
tns = self.gpt([[1, 2], [3, 4]])
tns[1, 0] = 9
assert_tensor_equal(tns, self.gpt([[1, 2], [9, 4]]))
def test_asnumpyarray(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.asnumpyarray()
assert isinstance(res, np.ndarray)
assert_tensor_equal(res, np.array([[1, 2], [3, 4]]))
@nottest # TODO: fix this for self.gpt
def test_transpose(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.transpose()
assert_tensor_equal(res, self.gpt([[1, 3], [2, 4]]))
def test_fill(self):
tns = self.gpt([[1, 2], [3, 4]])
tns.fill(-9.5)
assert_tensor_equal(tns, self.gpt([[-9.5, -9.5], [-9.5, -9.5]]))
|
kfoss/neon
|
neon/backends/tests/test_cc2_tensor.py
|
Python
|
apache-2.0
| 3,502
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class ExecDateAfterStartDateDep(BaseTIDep):
NAME = "Execution Date"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.start_date and ti.execution_date < ti.task.start_date:
yield self._failing_status(
reason="The execution date is {0} but this is before the task's start "
"date {1}.".format(
ti.execution_date.isoformat(),
ti.task.start_date.isoformat()))
if (ti.task.dag and ti.task.dag.start_date and
ti.execution_date < ti.task.dag.start_date):
yield self._failing_status(
reason="The execution date is {0} but this is before the task's "
"DAG's start date {1}.".format(
ti.execution_date.isoformat(),
ti.task.dag.start_date.isoformat()))
|
spektom/incubator-airflow
|
airflow/ti_deps/deps/exec_date_after_start_date_dep.py
|
Python
|
apache-2.0
| 1,807
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Hashing analyzer."""
import unittest
from plaso.containers import analyzer_result
from plaso.analyzers import hashing_analyzer
from plaso.analyzers.hashers import manager
from tests import test_lib as shared_test_lib
from tests.analyzers.hashers import manager as manager_test
class HashingAnalyzerTest(shared_test_lib.BaseTestCase):
"""Test the Hashing analyzer."""
# pylint: disable=protected-access
@classmethod
def setUpClass(cls):
"""Makes preparations before running any of the tests."""
manager.HashersManager.RegisterHasher(manager_test.TestHasher)
@classmethod
def tearDownClass(cls):
"""Cleans up after running all tests."""
manager.HashersManager.DeregisterHasher(manager_test.TestHasher)
def testHasherInitialization(self):
"""Test the creation of the analyzer, and the enabling of hashers."""
analyzer = hashing_analyzer.HashingAnalyzer()
analyzer.SetHasherNames('testhash')
self.assertEqual(len(analyzer._hashers), 1)
def testHashFile(self):
"""Tests that results are produced correctly."""
analyzer = hashing_analyzer.HashingAnalyzer()
analyzer.SetHasherNames('testhash')
analyzer.Analyze('test data')
results = analyzer.GetResults()
first_result = results[0]
self.assertIsInstance(first_result, analyzer_result.AnalyzerResult)
self.assertEqual(first_result.analyzer_name, 'hashing')
self.assertEqual(first_result.attribute_name, 'testhash_hash')
self.assertEqual(first_result.attribute_value, '4')
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
|
joachimmetz/plaso
|
tests/analyzers/hashing_analyzer.py
|
Python
|
apache-2.0
| 1,661
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # noqa; PEP8 asserts
from webtest_plus import TestApp
import mock
import httplib as http
from flask import Flask
from modularodm import Q
from werkzeug.wrappers import BaseResponse
from framework import auth
from framework.auth import cas
from framework.sessions import Session
from framework.exceptions import HTTPError
from tests.base import OsfTestCase, assert_is_redirect
from tests.factories import (
UserFactory, UnregUserFactory, AuthFactory,
ProjectFactory, NodeFactory, AuthUserFactory, PrivateLinkFactory
)
from framework.auth import User, Auth
from framework.auth.decorators import must_be_logged_in
from website import mails
from website import settings
from website.project.decorators import (
must_have_permission, must_be_contributor,
must_have_addon, must_be_addon_authorizer,
)
class TestAuthUtils(OsfTestCase):
def test_unreg_user_can_register(self):
user = UnregUserFactory()
auth.register_unconfirmed(
username=user.username,
password='gattaca',
fullname='Rosie',
)
assert_true(user.get_confirmation_token(user.username))
@mock.patch('framework.auth.views.mails.send_mail')
def test_confirm_email(self, mock_mail):
user = UnregUserFactory()
auth.register_unconfirmed(
username=user.username,
password='gattaca',
fullname='Rosie',
)
token = user.get_confirmation_token(user.username)
res = self.app.get('/confirm/{}/{}'.format(user._id, token), allow_redirects=False)
res = res.follow()
assert_equal(res.status_code, 302)
assert_in('login?service=', res.location)
user.reload()
assert_equal(len(mock_mail.call_args_list), 1)
empty, kwargs = mock_mail.call_args
kwargs['user'].reload()
assert_equal(empty, ())
assert_equal(kwargs, {
'user': user,
'mimetype': 'html',
'mail': mails.WELCOME,
'to_addr': user.username,
})
self.app.set_cookie(settings.COOKIE_NAME, user.get_or_create_cookie())
res = self.app.get('/confirm/{}/{}'.format(user._id, token))
res = res.follow()
assert_equal(res.status_code, 302)
assert_in('dashboard', res.location)
assert_equal(len(mock_mail.call_args_list), 1)
session = Session.find(
Q('data.auth_user_id', 'eq', user._id)
).sort(
'-date_modified'
).limit(1)[0]
assert_equal(len(session.data['status']), 1)
def test_get_user_by_id(self):
user = UserFactory()
assert_equal(User.load(user._id), user)
def test_get_user_by_email(self):
user = UserFactory()
assert_equal(auth.get_user(email=user.username), user)
def test_get_user_with_wrong_password_returns_false(self):
user = UserFactory.build()
user.set_password('killerqueen')
assert_false(
auth.get_user(email=user.username, password='wrong')
)
class TestAuthObject(OsfTestCase):
def test_repr(self):
auth = AuthFactory()
rep = repr(auth)
assert_in(str(auth.user), rep)
def test_factory(self):
auth_obj = AuthFactory()
assert_true(isinstance(auth_obj.user, auth.User))
def test_from_kwargs(self):
user = UserFactory()
request_args = {'view_only': 'mykey'}
kwargs = {'user': user}
auth_obj = Auth.from_kwargs(request_args, kwargs)
assert_equal(auth_obj.user, user)
assert_equal(auth_obj.private_key, request_args['view_only'])
def test_logged_in(self):
user = UserFactory()
auth_obj = Auth(user=user)
assert_true(auth_obj.logged_in)
auth2 = Auth(user=None)
assert_false(auth2.logged_in)
class TestPrivateLink(OsfTestCase):
def setUp(self):
super(TestPrivateLink, self).setUp()
self.flaskapp = Flask('testing_private_links')
@self.flaskapp.route('/project/<pid>/')
@must_be_contributor
def project_get(**kwargs):
return 'success', 200
self.app = TestApp(self.flaskapp)
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_has_private_link_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'view_only': self.link.key})
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.body, 'success')
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_does_not_have_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'key': None})
assert_is_redirect(res)
# Flask app for testing view decorators
decoratorapp = Flask('decorators')
@must_be_contributor
def view_that_needs_contributor(**kwargs):
return kwargs.get('node') or kwargs.get('parent')
class AuthAppTestCase(OsfTestCase):
def setUp(self):
self.ctx = decoratorapp.test_request_context()
self.ctx.push()
def tearDown(self):
self.ctx.pop()
class TestMustBeContributorDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeContributorDecorator, self).setUp()
self.contrib = AuthUserFactory()
self.project = ProjectFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.project.creator))
self.project.save()
def test_must_be_contributor_when_user_is_contributor(self):
result = view_that_needs_contributor(
pid=self.project._primary_key,
user=self.contrib)
assert_equal(result, self.project)
def test_must_be_contributor_when_user_is_not_contributor_raises_error(self):
non_contributor = AuthUserFactory()
with assert_raises(HTTPError):
view_that_needs_contributor(
pid=self.project._primary_key,
user=non_contributor
)
def test_must_be_contributor_no_user(self):
res = view_that_needs_contributor(
pid=self.project._primary_key,
user=None,
)
assert_is_redirect(res)
# redirects to login url
redirect_url = res.headers['Location']
login_url = cas.get_login_url(service_url='http://localhost/')
assert_equal(redirect_url, login_url)
def test_must_be_contributor_parent_admin(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
res = view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(res, node)
def test_must_be_contributor_parent_write(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
self.project.set_permissions(self.project.creator, ['read', 'write'])
self.project.save()
with assert_raises(HTTPError) as exc_info:
view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(exc_info.exception.code, 403)
@must_be_logged_in
def protected(**kwargs):
return 'open sesame'
@must_have_permission('dance')
def thriller(**kwargs):
return 'chiller'
class TestPermissionDecorators(AuthAppTestCase):
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_user(self, mock_from_kwargs):
user = UserFactory()
mock_from_kwargs.return_value = Auth(user=user)
protected()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_no_user(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth()
resp = protected()
assert_true(isinstance(resp, BaseResponse))
login_url = cas.get_login_url(service_url='http://localhost/')
assert_in(login_url, resp.headers.get('location'))
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_true(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
project.add_permission(project.creator, 'dance')
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
thriller(node=project)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_false(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.FORBIDDEN)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_not_logged_in(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth()
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.UNAUTHORIZED)
def needs_addon_view(**kwargs):
return 'openaddon'
class TestMustHaveAddonDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustHaveAddonDecorator, self).setUp()
self.project = ProjectFactory()
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_true(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.add_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_false(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.delete_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_true(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.add_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_false(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.delete_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
class TestMustBeAddonAuthorizerDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeAddonAuthorizerDecorator, self).setUp()
self.project = ProjectFactory()
self.decorated = must_be_addon_authorizer('github')(needs_addon_view)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_authorizer_true(self, mock_get_current_user, mock_kwargs_to_nodes):
# Mock
mock_get_current_user.return_value = Auth(self.project.creator)
mock_kwargs_to_nodes.return_value = (None, self.project)
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
self.project.creator.add_addon('github')
user_settings = self.project.creator.get_addon('github')
node_settings.user_settings = user_settings
# Test
res = self.decorated()
assert_equal(res, 'openaddon')
def test_must_be_authorizer_false(self):
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
user2 = UserFactory()
user2.add_addon('github')
user_settings = user2.get_addon('github')
node_settings.user_settings = user_settings
# Test
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_user_settings(self):
self.project.add_addon('github', auth=None)
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_node_settings(self):
with assert_raises(HTTPError):
self.decorated()
if __name__ == '__main__':
unittest.main()
|
brandonPurvis/osf.io
|
tests/test_auth.py
|
Python
|
apache-2.0
| 13,540
|
from __future__ import absolute_import
from django.contrib.auth.models import UserManager
from django.utils.timezone import now as timezone_now
from zerver.models import UserProfile, Recipient, Subscription, Realm, Stream
import base64
import ujson
import os
import string
from six.moves import range
from typing import Optional, Text
def random_api_key():
# type: () -> Text
choices = string.ascii_letters + string.digits
altchars = ''.join([choices[ord(os.urandom(1)) % 62] for _ in range(2)]).encode("utf-8")
return base64.b64encode(os.urandom(24), altchars=altchars).decode("utf-8")
# create_user_profile is based on Django's User.objects.create_user,
# except that we don't save to the database so it can used in
# bulk_creates
#
# Only use this for bulk_create -- for normal usage one should use
# create_user (below) which will also make the Subscription and
# Recipient objects
def create_user_profile(realm, email, password, active, bot_type, full_name,
short_name, bot_owner, is_mirror_dummy, tos_version,
timezone, tutorial_status=UserProfile.TUTORIAL_WAITING,
enter_sends=False):
# type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Text, Optional[Text], Optional[Text], bool) -> UserProfile
now = timezone_now()
email = UserManager.normalize_email(email)
user_profile = UserProfile(email=email, is_staff=False, is_active=active,
full_name=full_name, short_name=short_name,
last_login=now, date_joined=now, realm=realm,
pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,
bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,
tos_version=tos_version, timezone=timezone,
tutorial_status=tutorial_status,
enter_sends=enter_sends,
onboarding_steps=ujson.dumps([]),
default_language=realm.default_language)
if bot_type or not active:
password = None
user_profile.set_password(password)
user_profile.api_key = random_api_key()
return user_profile
def create_user(email, password, realm, full_name, short_name,
active=True, bot_type=None, bot_owner=None, tos_version=None,
timezone=u"", avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
is_mirror_dummy=False, default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=None, user_profile_id=None):
# type: (Text, Optional[Text], Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, Text, bool, Optional[Stream], Optional[Stream], Optional[bool], Optional[int]) -> UserProfile
user_profile = create_user_profile(realm, email, password, active, bot_type,
full_name, short_name, bot_owner,
is_mirror_dummy, tos_version, timezone)
user_profile.avatar_source = avatar_source
user_profile.timezone = timezone
user_profile.default_sending_stream = default_sending_stream
user_profile.default_events_register_stream = default_events_register_stream
# Allow the ORM default to be used if not provided
if default_all_public_streams is not None:
user_profile.default_all_public_streams = default_all_public_streams
if user_profile_id is not None:
user_profile.id = user_profile_id
user_profile.save()
recipient = Recipient.objects.create(type_id=user_profile.id,
type=Recipient.PERSONAL)
Subscription.objects.create(user_profile=user_profile, recipient=recipient)
return user_profile
|
vabs22/zulip
|
zerver/lib/create_user.py
|
Python
|
apache-2.0
| 3,927
|
from common_fixtures import * # NOQA
def test_link_instance_stop_start(super_client, client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
ports = set()
for link in c.instanceLinks():
for port in super_client.reload(link).data.fields.ports:
ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert len(ports) > 0
new_ports = set()
c = client.wait_success(c.stop())
assert c.state == 'stopped'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
new_ports = set()
c = client.wait_success(c.start())
assert c.state == 'running'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
def _find_agent_instance_ip(nsp, source):
assert source is not None
vnet_id = source.nics()[0].vnetId
assert vnet_id is not None
for agent_instance in nsp.instances():
if agent_instance.nics()[0].vnetId == vnet_id:
assert agent_instance.primaryIpAddress is not None
return agent_instance.primaryIpAddress
assert False, 'Failed to find agent instance for ' + source.id
def test_link_create(client, super_client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
assert len(c.instanceLinks()) == 2
assert len(target1.targetInstanceLinks()) == 1
assert len(target2.targetInstanceLinks()) == 1
links = c.instanceLinks()
names = set([x.linkName for x in links])
assert names == set(['target1_link', 'target2_link'])
for link in links:
link = super_client.reload(link)
assert link.state == 'active'
assert link.instanceId == c.id
ip_address = _find_agent_instance_ip(context.nsp,
super_client.reload(c))
if link.linkName == 'target1_link':
assert link.targetInstanceId == target1.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 180:
assert port.protocol == 'tcp'
elif port.privatePort == 122:
assert port.protocol == 'udp'
else:
assert False
if link.linkName == 'target2_link':
assert link.targetInstanceId == target2.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 280:
assert port.protocol == 'tcp'
elif port.privatePort == 222:
assert port.protocol == 'udp'
else:
assert False
def test_link_update(client, context):
target1 = context.create_container()
target2 = context.create_container()
c = context.create_container(instanceLinks={
'target1_link': target1.id,
})
link = c.instanceLinks()[0]
assert link.targetInstanceId == target1.id
link.targetInstanceId = target2.id
link = client.update(link, link)
assert link.state == 'updating-active'
link = client.wait_success(link)
assert link.targetInstanceId == target2.id
assert link.state == 'active'
def test_link_remove_restore(client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
assert link.state == 'active'
c = client.wait_success(c.stop())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.restore())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.purge())
link = client.reload(link)
assert c.state == 'purged'
assert link.state == 'removed'
def test_null_links(context):
c = context.create_container(instanceLinks={
'null_link': None
})
links = c.instanceLinks()
assert len(links) == 1
assert links[0].state == 'active'
assert links[0].linkName == 'null_link'
assert links[0].targetInstanceId is None
def test_link_timeout(super_client, client, context):
t = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
c = super_client.create_container(accountId=context.project.id,
imageUuid=context.image_uuid,
instanceLinks={'t': t.id},
data={'linkWaitTime': 100})
c = client.wait_transitioning(c)
assert c.state == 'running'
def test_link_remove_instance_restart(client, super_client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
assert link.state == 'active'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
link = client.reload(link)
link = super_client.wait_success(link.remove())
assert link.state == 'removed'
c = client.wait_success(c.start())
assert c.state == 'running'
|
jimengliu/cattle
|
tests/integration/cattletest/core/test_link.py
|
Python
|
apache-2.0
| 7,262
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from tempfile import mkdtemp
from urllib import quote
import shutil
from swift.common.storage_policy import StoragePolicy
from swift.common.swob import Request
from swift.common.utils import mkdirs, split_path
from swift.common.wsgi import monkey_patch_mimetools, WSGIContext
from swift.obj import server as object_server
from swift.proxy import server as proxy
import swift.proxy.controllers
from test.unit import FakeMemcache, debug_logger, FakeRing, \
fake_http_connect, patch_policies
class FakeServerConnection(WSGIContext):
'''Fakes an HTTPConnection to a server instance.'''
def __init__(self, app):
super(FakeServerConnection, self).__init__(app)
self.data = ''
def getheaders(self):
return self._response_headers
def read(self, amt=None):
try:
result = self.resp_iter.next()
return result
except StopIteration:
return ''
def getheader(self, name, default=None):
result = self._response_header_value(name)
return result if result else default
def getresponse(self):
environ = {'REQUEST_METHOD': self.method}
req = Request.blank(self.path, environ, headers=self.req_headers,
body=self.data)
self.data = ''
self.resp = self._app_call(req.environ)
self.resp_iter = iter(self.resp)
if self._response_headers is None:
self._response_headers = []
status_parts = self._response_status.split(' ', 1)
self.status = int(status_parts[0])
self.reason = status_parts[1] if len(status_parts) == 2 else ''
return self
def getexpect(self):
class ContinueResponse(object):
status = 100
return ContinueResponse()
def send(self, data):
self.data += data
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)
self.method = method
self.req_headers = headers
return self
def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(replicas=1))])
class TestObjectSysmeta(unittest.TestCase):
'''Tests object sysmeta is correctly handled by combination
of proxy server and object server.
'''
def _assertStatus(self, resp, expected):
self.assertEqual(resp.status_int, expected,
'Expected %d, got %s'
% (expected, resp.status))
def _assertInHeaders(self, resp, expected):
for key, val in expected.iteritems():
self.assertTrue(key in resp.headers,
'Header %s missing from %s' % (key, resp.headers))
self.assertEqual(val, resp.headers[key],
'Expected header %s:%s, got %s:%s'
% (key, val, key, resp.headers[key]))
def _assertNotInHeaders(self, resp, unexpected):
for key, val in unexpected.iteritems():
self.assertFalse(key in resp.headers,
'Header %s not expected in %s'
% (key, resp.headers))
def setUp(self):
self.app = proxy.Application(None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(replicas=1),
container_ring=FakeRing(replicas=1))
monkey_patch_mimetools()
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))
http_connect = get_http_connect(fake_http_connect(200),
fake_http_connect(200),
FakeServerConnection(self.obj_ctlr))
swift.proxy.controllers.base.http_connect = http_connect
swift.proxy.controllers.obj.http_connect = http_connect
def tearDown(self):
shutil.rmtree(self.tmpdir)
original_sysmeta_headers_1 = {'x-object-sysmeta-test0': 'val0',
'x-object-sysmeta-test1': 'val1'}
original_sysmeta_headers_2 = {'x-object-sysmeta-test2': 'val2'}
changed_sysmeta_headers = {'x-object-sysmeta-test0': '',
'x-object-sysmeta-test1': 'val1 changed'}
new_sysmeta_headers = {'x-object-sysmeta-test3': 'val3'}
original_meta_headers_1 = {'x-object-meta-test0': 'meta0',
'x-object-meta-test1': 'meta1'}
original_meta_headers_2 = {'x-object-meta-test2': 'meta2'}
changed_meta_headers = {'x-object-meta-test0': '',
'x-object-meta-test1': 'meta1 changed'}
new_meta_headers = {'x-object-meta-test3': 'meta3'}
bad_headers = {'x-account-sysmeta-test1': 'bad1'}
def test_PUT_sysmeta_then_GET(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_PUT_sysmeta_then_HEAD(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank(path, environ=env)
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_replaced_by_PUT(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.original_meta_headers_2)
def _test_sysmeta_not_updated_by_POST(self):
# check sysmeta is not changed by a POST but user meta is replaced
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'POST'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 202)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.bad_headers)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
def test_sysmeta_not_updated_by_POST(self):
self.app.object_post_as_copy = False
self._test_sysmeta_not_updated_by_POST()
def test_sysmeta_not_updated_by_POST_as_copy(self):
self.app.object_post_as_copy = True
self._test_sysmeta_not_updated_by_POST()
def test_sysmeta_updated_by_COPY(self):
# check sysmeta is updated by a COPY in same way as user meta
path = '/v1/a/c/o'
dest = '/c/o2'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'COPY'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
hdrs.update({'Destination': dest})
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_updated_by_COPY_from(self):
# check sysmeta is updated by a COPY in same way as user meta
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
hdrs.update({'X-Copy-From': '/c/o'})
req = Request.blank('/v1/a/c/o2', environ=env, headers=hdrs, body='')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
|
eatbyte/Swift
|
test/unit/proxy/test_sysmeta.py
|
Python
|
apache-2.0
| 15,680
|
# some modules use the old-style import: explicitly include
# the new module when the old one is referenced
hiddenimports = ["email.mime.text", "email.mime.multipart"]
|
supercheetah/diceroller
|
pyinstaller/PyInstaller/hooks/hook-paste.exceptions.reporter.py
|
Python
|
artistic-2.0
| 169
|
"""
#;+
#; NAME:
#; galaxy.core
#; Version 1.0
#;
#; PURPOSE:
#; Core routines for galaxy analysis
#; 29-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os, copy, sys
import numpy as np
from astropy import units as u
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from xastropy.xutils import xdebug as xdb
# Class for LLS Absorption Lines
class Galaxy(object):
"""A Galaxy Class
Attributes:
name: string
Name(s)
z: float
Adopted redshift
coord: Coordinates
mstar: float
Stellar mass (MsolMass)
"""
# Initialize with a .dat file
def __init__(self, ra=None, dec=None, z=0.):
self.z = z
# Coord
if ra is None:
ras = '00 00 00'
else:
ras = str(ra)
if dec is None:
decs = '+00 00 00'
else:
decs = str(dec)
self.coord = SkyCoord(ras, decs, 'icrs', unit=(u.hour, u.deg))
# Name
self.name = ('J'+
self.coord.ra.to_string(unit=u.hour,sep='',pad=True)+
self.coord.dec.to_string(sep='',pad=True,alwayssign=True))
# #############
def __repr__(self):
return ('[Galaxy: {:s} {:s} {:s}, z={:g}]'.format(
self.name,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True),
self.z) )
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
# Instantiate
gal = Galaxy()
print(gal)
|
astronomeara/xastropy-old
|
xastropy/galaxy/core.py
|
Python
|
bsd-3-clause
| 1,859
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-12 13:25
from __future__ import unicode_literals
from django.db import migrations
from django.utils.text import slugify
def create_slugs(apps, schema_editor):
Value = apps.get_model("product", "AttributeChoiceValue")
for value in Value.objects.all():
value.slug = slugify(value.display)
value.save()
class Migration(migrations.Migration):
dependencies = [("product", "0017_attributechoicevalue_slug")]
operations = [migrations.RunPython(create_slugs, migrations.RunPython.noop)]
|
maferelo/saleor
|
saleor/product/migrations/0018_auto_20161212_0725.py
|
Python
|
bsd-3-clause
| 582
|
"""
sphinxit.core.constants
~~~~~~~~~~~~~~~~~~~~~~~
Defines some Sphinx-specific constants.
:copyright: (c) 2013 by Roman Semirook.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
RESERVED_KEYWORDS = (
'AND',
'AS',
'ASC',
'AVG',
'BEGIN',
'BETWEEN',
'BY',
'CALL',
'COLLATION',
'COMMIT',
'COUNT',
'DELETE',
'DESC',
'DESCRIBE',
'DISTINCT',
'FALSE',
'FROM',
'GLOBAL',
'GROUP',
'IN',
'INSERT',
'INTO',
'LIMIT',
'MATCH',
'MAX',
'META',
'MIN',
'NOT',
'NULL',
'OPTION',
'OR',
'ORDER',
'REPLACE',
'ROLLBACK',
'SELECT',
'SET',
'SHOW',
'START',
'STATUS',
'SUM',
'TABLES',
'TRANSACTION',
'TRUE',
'UPDATE',
'VALUES',
'VARIABLES',
'WARNINGS',
'WEIGHT',
'WHERE',
'WITHIN'
)
ESCAPED_CHARS = namedtuple('EscapedChars', ['single_escape', 'double_escape'])(
single_escape=("'", '+', '[', ']', '=', '*'),
double_escape=('@', '!', '^', '(', ')', '~', '-', '|', '/', '<<', '$', '"')
)
NODES_ORDER = namedtuple('NodesOrder', ['select', 'update'])(
select=(
'SelectFrom',
'Where',
'GroupBy',
'OrderBy',
'WithinGroupOrderBy',
'Limit',
'Options'
),
update=(
'UpdateSet',
'Where',
'Options'
)
)
|
abhijo89/sphinxit
|
sphinxit/core/constants.py
|
Python
|
bsd-3-clause
| 1,436
|
# -*- coding: utf-8 -*-
"""
Provides textual descriptions for :mod:`behave.model` elements.
"""
from behave.textutil import indent
# -----------------------------------------------------------------------------
# FUNCTIONS:
# -----------------------------------------------------------------------------
def escape_cell(cell):
"""
Escape table cell contents.
:param cell: Table cell (as unicode string).
:return: Escaped cell (as unicode string).
"""
cell = cell.replace(u'\\', u'\\\\')
cell = cell.replace(u'\n', u'\\n')
cell = cell.replace(u'|', u'\\|')
return cell
def escape_triple_quotes(text):
"""
Escape triple-quotes, used for multi-line text/doc-strings.
"""
return text.replace(u'"""', u'\\"\\"\\"')
# -----------------------------------------------------------------------------
# CLASS:
# -----------------------------------------------------------------------------
class ModelDescriptor(object):
@staticmethod
def describe_table(table, indentation=None):
"""
Provide a textual description of the table (as used w/ Gherkin).
:param table: Table to use (as :class:`behave.model.Table`)
:param indentation: Line prefix to use (as string, if any).
:return: Textual table description (as unicode string).
"""
# -- STEP: Determine output size of all cells.
cell_lengths = []
all_rows = [table.headings] + table.rows
for row in all_rows:
lengths = [len(escape_cell(c)) for c in row]
cell_lengths.append(lengths)
# -- STEP: Determine max. output size for each column.
max_lengths = []
for col in range(0, len(cell_lengths[0])):
max_lengths.append(max([c[col] for c in cell_lengths]))
# -- STEP: Build textual table description.
lines = []
for r, row in enumerate(all_rows):
line = u"|"
for c, (cell, max_length) in enumerate(zip(row, max_lengths)):
pad_size = max_length - cell_lengths[r][c]
line += u" %s%s |" % (escape_cell(cell), " " * pad_size)
line += u"\n"
lines.append(line)
if indentation:
return indent(lines, indentation)
# -- OTHERWISE:
return u"".join(lines)
@staticmethod
def describe_docstring(doc_string, indentation=None):
"""
Provide a textual description of the multi-line text/triple-quoted
doc-string (as used w/ Gherkin).
:param doc_string: Multi-line text to use.
:param indentation: Line prefix to use (as string, if any).
:return: Textual table description (as unicode string).
"""
text = escape_triple_quotes(doc_string)
text = u'"""\n' + text + '\n"""\n'
if indentation:
text = indent(text, indentation)
return text
class ModelPrinter(ModelDescriptor):
def __init__(self, stream):
super(ModelPrinter, self).__init__()
self.stream = stream
def print_table(self, table, indentation=None):
self.stream.write(self.describe_table(table, indentation))
self.stream.flush()
def print_docstring(self, text, indentation=None):
self.stream.write(self.describe_docstring(text, indentation))
self.stream.flush()
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/behave/model_describe.py
|
Python
|
bsd-3-clause
| 3,364
|
#!/usr/bin/env python
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import heapq
import os
import platform
import random
import signal
import subprocess
# Base dir of the build products for Release and Debug.
OUT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
def list_processes_linux():
"""Returns list of tuples (pid, command) of processes running in the same out
directory as this checkout.
"""
if platform.system() != 'Linux':
return []
try:
cmd = 'pgrep -fa %s' % OUT_DIR
output = subprocess.check_output(cmd, shell=True) or ''
processes = [
(int(line.split()[0]), line[line.index(OUT_DIR):])
for line in output.splitlines()
]
# Filter strange process with name as out dir.
return [p for p in processes if p[1] != OUT_DIR]
except:
return []
def kill_processes_linux():
"""Kill stray processes on the system that started in the same out directory.
All swarming tasks share the same out directory location.
"""
if platform.system() != 'Linux':
return
for pid, cmd in list_processes_linux():
try:
print('Attempting to kill %d - %s' % (pid, cmd))
os.kill(pid, signal.SIGKILL)
except:
pass
class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the
smallest element when the collection is 'full'.
If you need a reversed behaviour (collect min values) just provide an
inverse key."""
def __init__(self, size, key=None):
self.size = size
self.key = key or (lambda x: x)
self.data = []
self.discriminator = 0
def add(self, elem):
elem_k = self.key(elem)
heapq.heappush(self.data, (elem_k, self.extra_key(), elem))
if len(self.data) > self.size:
heapq.heappop(self.data)
def extra_key(self):
# Avoid key clash in tuples sent to the heap.
# We want to avoid comparisons on the last element of the tuple
# since those elements might not be comparable.
self.discriminator += 1
return self.discriminator
def as_list(self):
original_data = [rec for (_, _, rec) in self.data]
return sorted(original_data, key=self.key, reverse=True)
|
youtube/cobalt
|
third_party/v8/tools/testrunner/testproc/util.py
|
Python
|
bsd-3-clause
| 2,389
|
from django.utils.translation import gettext_lazy as _
from wagtail.admin.ui.tables import Column, StatusFlagColumn, TitleColumn
from wagtail.admin.views import generic
from wagtail.admin.viewsets.model import ModelViewSet
from wagtail.core.models import Site
from wagtail.core.permissions import site_permission_policy
from wagtail.sites.forms import SiteForm
class IndexView(generic.IndexView):
page_title = _("Sites")
add_item_label = _("Add a site")
context_object_name = 'sites'
default_ordering = 'hostname'
columns = [
TitleColumn('hostname', label=_("Site"), sort_key='hostname', url_name='wagtailsites:edit'),
Column('port', sort_key='port'),
Column('site_name'),
Column('root_page'),
StatusFlagColumn('is_default_site', label=_("Default?"), true_label=_("Default")),
]
class CreateView(generic.CreateView):
page_title = _("Add site")
success_message = _("Site '{0}' created.")
template_name = 'wagtailsites/create.html'
class EditView(generic.EditView):
success_message = _("Site '{0}' updated.")
error_message = _("The site could not be saved due to errors.")
delete_item_label = _("Delete site")
context_object_name = 'site'
template_name = 'wagtailsites/edit.html'
class DeleteView(generic.DeleteView):
success_message = _("Site '{0}' deleted.")
page_title = _("Delete site")
confirmation_message = _("Are you sure you want to delete this site?")
class SiteViewSet(ModelViewSet):
icon = 'site'
model = Site
permission_policy = site_permission_policy
index_view_class = IndexView
add_view_class = CreateView
edit_view_class = EditView
delete_view_class = DeleteView
def get_form_class(self, for_update=False):
return SiteForm
|
zerolab/wagtail
|
wagtail/sites/views.py
|
Python
|
bsd-3-clause
| 1,799
|
"""Tests for Gosper's algorithm for hypergeometric summation. """
from sympy import binomial, factorial, gamma, Poly, S, simplify, sqrt, exp, log, Symbol
from sympy.abc import a, b, j, k, m, n, r, x
from sympy.concrete.gosper import gosper_normal, gosper_sum, gosper_term
def test_gosper_normal():
assert gosper_normal(4*n + 5, 2*(4*n + 1)*(2*n + 3), n) == \
(Poly(S(1)/4, n), Poly(n + S(3)/2), Poly(n + S(1)/4))
def test_gosper_term():
assert gosper_term((4*k + 1)*factorial(
k)/factorial(2*k + 1), k) == (-k - S(1)/2)/(k + S(1)/4)
def test_gosper_sum():
assert gosper_sum(1, (k, 0, n)) == 1 + n
assert gosper_sum(k, (k, 0, n)) == n*(1 + n)/2
assert gosper_sum(k**2, (k, 0, n)) == n*(1 + n)*(1 + 2*n)/6
assert gosper_sum(k**3, (k, 0, n)) == n**2*(1 + n)**2/4
assert gosper_sum(2**k, (k, 0, n)) == 2*2**n - 1
assert gosper_sum(factorial(k), (k, 0, n)) is None
assert gosper_sum(binomial(n, k), (k, 0, n)) is None
assert gosper_sum(factorial(k)/k**2, (k, 0, n)) is None
assert gosper_sum((k - 3)*factorial(k), (k, 0, n)) is None
assert gosper_sum(k*factorial(k), k) == factorial(k)
assert gosper_sum(
k*factorial(k), (k, 0, n)) == n*factorial(n) + factorial(n) - 1
assert gosper_sum((-1)**k*binomial(n, k), (k, 0, n)) == 0
assert gosper_sum((
-1)**k*binomial(n, k), (k, 0, m)) == -(-1)**m*(m - n)*binomial(n, m)/n
assert gosper_sum((4*k + 1)*factorial(k)/factorial(2*k + 1), (k, 0, n)) == \
(2*factorial(2*n + 1) - factorial(n))/factorial(2*n + 1)
# issue 2934:
assert gosper_sum(
n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b)), \
(n, 0, m)) == -a*b*(exp(m*log(a))*exp(m*log(b))*factorial(a)* \
factorial(b) - factorial(a + m)*factorial(b + m))/(factorial(a)* \
factorial(b)*factorial(a + m)*factorial(b + m))
def test_gosper_sum_indefinite():
assert gosper_sum(k, k) == k*(k - 1)/2
assert gosper_sum(k**2, k) == k*(k - 1)*(2*k - 1)/6
assert gosper_sum(1/(k*(k + 1)), k) == -1/k
assert gosper_sum(-(27*k**4 + 158*k**3 + 430*k**2 + 678*k + 445)*gamma(2*k + 4)/(3*(3*k + 7)*gamma(3*k + 6)), k) == \
(3*k + 5)*(k**2 + 2*k + 5)*gamma(2*k + 4)/gamma(3*k + 6)
def test_gosper_sum_parametric():
assert gosper_sum(binomial(S(1)/2, m - j + 1)*binomial(S(1)/2, m + j), (j, 1, n)) == \
n*(1 + m - n)*(-1 + 2*m + 2*n)*binomial(S(1)/2, 1 + m - n)* \
binomial(S(1)/2, m + n)/(m*(1 + 2*m))
def test_gosper_sum_algebraic():
assert gosper_sum(
n**2 + sqrt(2), (n, 0, m)) == (m + 1)*(2*m**2 + m + 6*sqrt(2))/6
def test_gosper_sum_iterated():
f1 = binomial(2*k, k)/4**k
f2 = (1 + 2*n)*binomial(2*n, n)/4**n
f3 = (1 + 2*n)*(3 + 2*n)*binomial(2*n, n)/(3*4**n)
f4 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*binomial(2*n, n)/(15*4**n)
f5 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*(7 + 2*n)*binomial(2*n, n)/(105*4**n)
assert gosper_sum(f1, (k, 0, n)) == f2
assert gosper_sum(f2, (n, 0, n)) == f3
assert gosper_sum(f3, (n, 0, n)) == f4
assert gosper_sum(f4, (n, 0, n)) == f5
# the AeqB tests test expressions given in
# www.math.upenn.edu/~wilf/AeqB.pdf
def test_gosper_sum_AeqB_part1():
f1a = n**4
f1b = n**3*2**n
f1c = 1/(n**2 + sqrt(5)*n - 1)
f1d = n**4*4**n/binomial(2*n, n)
f1e = factorial(3*n)/(factorial(n)*factorial(n + 1)*factorial(n + 2)*27**n)
f1f = binomial(2*n, n)**2/((n + 1)*4**(2*n))
f1g = (4*n - 1)*binomial(2*n, n)**2/((2*n - 1)**2*4**(2*n))
f1h = n*factorial(n - S(1)/2)**2/factorial(n + 1)**2
g1a = m*(m + 1)*(2*m + 1)*(3*m**2 + 3*m - 1)/30
g1b = 26 + 2**(m + 1)*(m**3 - 3*m**2 + 9*m - 13)
g1c = (m + 1)*(m*(m**2 - 7*m + 3)*sqrt(5) - (
3*m**3 - 7*m**2 + 19*m - 6))/(2*m**3*sqrt(5) + m**4 + 5*m**2 - 1)/6
g1d = -S(2)/231 + 2*4**m*(m + 1)*(63*m**4 + 112*m**3 + 18*m**2 -
22*m + 3)/(693*binomial(2*m, m))
g1e = -S(9)/2 + (81*m**2 + 261*m + 200)*factorial(
3*m + 2)/(40*27**m*factorial(m)*factorial(m + 1)*factorial(m + 2))
g1f = (2*m + 1)**2*binomial(2*m, m)**2/(4**(2*m)*(m + 1))
g1g = -binomial(2*m, m)**2/4**(2*m)
g1h = -(2*m + 1)**2*(3*m + 4)*factorial(m - S(1)/2)**2/factorial(m + 1)**2
g = gosper_sum(f1a, (n, 0, m))
assert g is not None and simplify(g - g1a) == 0
g = gosper_sum(f1b, (n, 0, m))
assert g is not None and simplify(g - g1b) == 0
g = gosper_sum(f1c, (n, 0, m))
assert g is not None and simplify(g - g1c) == 0
g = gosper_sum(f1d, (n, 0, m))
assert g is not None and simplify(g - g1d) == 0
g = gosper_sum(f1e, (n, 0, m))
assert g is not None and simplify(g - g1e) == 0
g = gosper_sum(f1f, (n, 0, m))
assert g is not None and simplify(g - g1f) == 0
g = gosper_sum(f1g, (n, 0, m))
assert g is not None and simplify(g - g1g) == 0
g = gosper_sum(f1h, (n, 0, m))
assert g is not None and simplify(g - g1h) == 0
def test_gosper_sum_AeqB_part2():
f2a = n**2*a**n
f2b = (n - r/2)*binomial(r, n)
f2c = factorial(n - 1)**2/(factorial(n - x)*factorial(n + x))
g2a = -a*(a + 1)/(a - 1)**3 + a**(
m + 1)*(a**2*m**2 - 2*a*m**2 + m**2 - 2*a*m + 2*m + a + 1)/(a - 1)**3
g2b = (m - r)*binomial(r, m)/2
ff = factorial(1 - x)*factorial(1 + x)
g2c = 1/ff*(
1 - 1/x**2) + factorial(m)**2/(x**2*factorial(m - x)*factorial(m + x))
g = gosper_sum(f2a, (n, 0, m))
assert g is not None and simplify(g - g2a) == 0
g = gosper_sum(f2b, (n, 0, m))
assert g is not None and simplify(g - g2b) == 0
g = gosper_sum(f2c, (n, 1, m))
assert g is not None and simplify(g - g2c) == 0
def test_gosper_nan():
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
n = Symbol('n', integer=True)
m = Symbol('m', integer=True)
f2d = n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b))
g2d = 1/(factorial(a - 1)*factorial(
b - 1)) - a**(m + 1)*b**(m + 1)/(factorial(a + m)*factorial(b + m))
g = gosper_sum(f2d, (n, 0, m))
assert simplify(g - g2d) == 0
def test_gosper_sum_AeqB_part3():
f3a = 1/n**4
f3b = (6*n + 3)/(4*n**4 + 8*n**3 + 8*n**2 + 4*n + 3)
f3c = 2**n*(n**2 - 2*n - 1)/(n**2*(n + 1)**2)
f3d = n**2*4**n/((n + 1)*(n + 2))
f3e = 2**n/(n + 1)
f3f = 4*(n - 1)*(n**2 - 2*n - 1)/(n**2*(n + 1)**2*(n - 2)**2*(n - 3)**2)
f3g = (n**4 - 14*n**2 - 24*n - 9)*2**n/(n**2*(n + 1)**2*(n + 2)**2*
(n + 3)**2)
# g3a -> no closed form
g3b = m*(m + 2)/(2*m**2 + 4*m + 3)
g3c = 2**m/m**2 - 2
g3d = S(2)/3 + 4**(m + 1)*(m - 1)/(m + 2)/3
# g3e -> no closed form
g3f = -(-S(1)/16 + 1/((m - 2)**2*(m + 1)**2)) # the AeqB key is wrong
g3g = -S(2)/9 + 2**(m + 1)/((m + 1)**2*(m + 3)**2)
g = gosper_sum(f3a, (n, 1, m))
assert g is None
g = gosper_sum(f3b, (n, 1, m))
assert g is not None and simplify(g - g3b) == 0
g = gosper_sum(f3c, (n, 1, m - 1))
assert g is not None and simplify(g - g3c) == 0
g = gosper_sum(f3d, (n, 1, m))
assert g is not None and simplify(g - g3d) == 0
g = gosper_sum(f3e, (n, 0, m - 1))
assert g is None
g = gosper_sum(f3f, (n, 4, m))
assert g is not None and simplify(g - g3f) == 0
g = gosper_sum(f3g, (n, 1, m))
assert g is not None and simplify(g - g3g) == 0
|
hrashk/sympy
|
sympy/concrete/tests/test_gosper.py
|
Python
|
bsd-3-clause
| 7,307
|
from __future__ import print_function, absolute_import, division
import os
import shutil
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from casa_formats_io import coordsys_to_astropy_wcs
from ..io.casa_masks import make_casa_mask
from .. import StokesSpectralCube, BooleanArrayMask
from .. import SpectralCube, VaryingResolutionSpectralCube
try:
import casatools
from casatools import image
CASA_INSTALLED = True
except ImportError:
try:
from taskinit import ia as image
CASA_INSTALLED = True
except ImportError:
CASA_INSTALLED = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
def make_casa_testimage(infile, outname):
infile = str(infile)
outname = str(outname)
if not CASA_INSTALLED:
raise Exception("Attempted to make a CASA test image in a non-CASA "
"environment")
ia = image()
ia.fromfits(infile=infile, outfile=outname, overwrite=True)
ia.unlock()
ia.close()
ia.done()
cube = SpectralCube.read(infile)
if isinstance(cube, VaryingResolutionSpectralCube):
ia.open(outname)
# populate restoring beam emptily
ia.setrestoringbeam(major={'value':1.0, 'unit':'arcsec'},
minor={'value':1.0, 'unit':'arcsec'},
pa={'value':90.0, 'unit':'deg'},
channel=len(cube.beams)-1,
polarization=-1,
)
# populate each beam (hard assumption of 1 poln)
for channum, beam in enumerate(cube.beams):
casabdict = {'major': {'value':beam.major.to(u.deg).value, 'unit':'deg'},
'minor': {'value':beam.minor.to(u.deg).value, 'unit':'deg'},
'positionangle': {'value':beam.pa.to(u.deg).value, 'unit':'deg'}
}
ia.setrestoringbeam(beam=casabdict, channel=channum, polarization=0)
ia.unlock()
ia.close()
ia.done()
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(('memmap', 'bigendian'), product((False, True), (False, True)))
def test_casa_read_basic(memmap, bigendian):
# Check that SpectralCube.read works for an example CASA dataset stored
# in the tests directory. This test should NOT require CASA, whereas a
# number of tests below require CASA to generate test datasets. The present
# test is to ensure CASA is not required for reading.
if bigendian:
cube = SpectralCube.read(os.path.join(DATA, 'basic_bigendian.image'), memmap=memmap)
else:
cube = SpectralCube.read(os.path.join(DATA, 'basic.image'), memmap=memmap)
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
def test_casa_read_basic_nodask():
# For CASA datasets, the default when reading cubes is use_dask=True.
# Here we check that setting use_dask=False explicitly raises an error.
with pytest.raises(ValueError, match='Loading CASA datasets is not possible with use_dask=False'):
SpectralCube.read(os.path.join(DATA, 'basic.image'), use_dask=False)
def test_casa_read_basic_nomask():
# Make sure things work well if there is no mask in the data
cube = SpectralCube.read(os.path.join(DATA, 'nomask.image'))
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
# Slice the cube
assert_quantity_allclose(cube[:, 0, 0],
[1, 1, 1] * u.Jy / u.beam)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', 'data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read(filename, tmp_path):
# Check that SpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', 'data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read_nomask(filename, tmp_path):
# As for test_casa_read, but we remove the mask to make sure
# that we can still read in the cubes
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
shutil.rmtree(tmp_path / 'casa.image' / 'mask0')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_read_stokes(data_advs, tmp_path):
# Check that StokesSpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = StokesSpectralCube.read(data_advs)
make_casa_testimage(data_advs, tmp_path / 'casa.image')
casacube = StokesSpectralCube.read(tmp_path / 'casa.image')
assert casacube.I.shape == cube.I.shape
assert_allclose(casacube.I.unmasked_data[:].value,
cube.I.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_mask(data_adv, tmp_path):
# This tests the make_casa_mask function which can be used to create a mask
# file in an existing image.
cube = SpectralCube.read(data_adv)
mask_array = np.array([[True, False], [False, False], [True, True]])
bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
shape=cube.shape)
cube = cube.with_mask(bool_mask)
make_casa_mask(cube, str(tmp_path / 'casa.mask'), add_stokes=False,
append_to_image=False, overwrite=True)
ia = casatools.image()
ia.open(str(tmp_path / 'casa.mask'))
casa_mask = ia.getchunk()
coords = ia.coordsys()
ia.unlock()
ia.close()
ia.done()
# Test masks
# Mask array is broadcasted to the cube shape. Mimic this, switch to ints,
# and transpose to match CASA image.
compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T
assert np.all(compare_mask == casa_mask)
# Test WCS info
# Convert back to an astropy wcs object so transforms are dealt with.
casa_wcs = coordsys_to_astropy_wcs(coords.torecord())
header = casa_wcs.to_header() # Invokes transform
# Compare some basic properties EXCLUDING the spectral axis
assert_allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2])
assert_allclose(cube.wcs.wcs.cdelt[:2], casa_wcs.wcs.cdelt[:2])
assert np.all(list(cube.wcs.wcs.cunit)[:2] == list(casa_wcs.wcs.cunit)[:2])
assert np.all(list(cube.wcs.wcs.ctype)[:2] == list(casa_wcs.wcs.ctype)[:2])
assert_allclose(cube.wcs.wcs.crpix, casa_wcs.wcs.crpix)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_mask_append(data_adv, tmp_path):
# This tests the append option for the make_casa_mask function
cube = SpectralCube.read(data_adv)
mask_array = np.array([[True, False], [False, False], [True, True]])
bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
shape=cube.shape)
cube = cube.with_mask(bool_mask)
make_casa_testimage(data_adv, tmp_path / 'casa.image')
# in this case, casa.mask is the name of the mask, not its path
make_casa_mask(cube, 'casa.mask', append_to_image=True,
img=str(tmp_path / 'casa.image'), add_stokes=False, overwrite=True)
assert os.path.exists(tmp_path / 'casa.image/casa.mask')
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_beams(data_adv, data_adv_beams, tmp_path):
# Test both make_casa_testimage and the beam reading tools using casa's
# image reader
make_casa_testimage(data_adv, tmp_path / 'casa_adv.image')
make_casa_testimage(data_adv_beams, tmp_path / 'casa_adv_beams.image')
cube = SpectralCube.read(tmp_path / 'casa_adv.image', format='casa_image')
assert hasattr(cube, 'beam')
cube_beams = SpectralCube.read(tmp_path / 'casa_adv_beams.image', format='casa_image')
assert hasattr(cube_beams, 'beams')
assert isinstance(cube_beams, VaryingResolutionSpectralCube)
|
radio-astro-tools/spectral-cube
|
spectral_cube/tests/test_casafuncs.py
|
Python
|
bsd-3-clause
| 9,896
|
from test_support import verify, verbose, TestFailed
from string import join
from random import random, randint
# SHIFT should match the value in longintrepr.h for best testing.
SHIFT = 15
BASE = 2 ** SHIFT
MASK = BASE - 1
# Max number of base BASE digits to use in test cases. Doubling
# this will at least quadruple the runtime.
MAXDIGITS = 10
# build some special values
special = map(long, [0, 1, 2, BASE, BASE >> 1])
special.append(0x5555555555555555L)
special.append(0xaaaaaaaaaaaaaaaaL)
# some solid strings of one bits
p2 = 4L # 0 and 1 already added
for i in range(2*SHIFT):
special.append(p2 - 1)
p2 = p2 << 1
del p2
# add complements & negations
special = special + map(lambda x: ~x, special) + \
map(lambda x: -x, special)
# ------------------------------------------------------------ utilities
# Use check instead of assert so the test still does something
# under -O.
def check(ok, *args):
if not ok:
raise TestFailed, join(map(str, args), " ")
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
# is constructed to contain long strings of 0 and 1 bits. These are
# more likely than random bits to provoke digit-boundary errors.
# The sign of the number is also random.
def getran(ndigits):
verify(ndigits > 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0L
nbits = 0
r = int(random() * (SHIFT * 2)) | 1 # force 1 bits to start
while nbits < nbits_lo:
bits = (r >> 1) + 1
bits = min(bits, nbits_hi - nbits)
verify(1 <= bits <= SHIFT)
nbits = nbits + bits
answer = answer << bits
if r & 1:
answer = answer | ((1 << bits) - 1)
r = int(random() * (SHIFT * 2))
verify(nbits_lo <= nbits <= nbits_hi)
if random() < 0.5:
answer = -answer
return answer
# Get random long consisting of ndigits random digits (relative to base
# BASE). The sign bit is also random.
def getran2(ndigits):
answer = 0L
for i in range(ndigits):
answer = (answer << SHIFT) | randint(0, MASK)
if random() < 0.5:
answer = -answer
return answer
# --------------------------------------------------------------- divmod
def test_division_2(x, y):
q, r = divmod(x, y)
q2, r2 = x/y, x%y
pab, pba = x*y, y*x
check(pab == pba, "multiplication does not commute for", x, y)
check(q == q2, "divmod returns different quotient than / for", x, y)
check(r == r2, "divmod returns different mod than % for", x, y)
check(x == q*y + r, "x != q*y + r after divmod on", x, y)
if y > 0:
check(0 <= r < y, "bad mod from divmod on", x, y)
else:
check(y < r <= 0, "bad mod from divmod on", x, y)
def test_division(maxdigits=MAXDIGITS):
print "long / * % divmod"
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
for leny in digits:
y = getran(leny) or 1L
test_division_2(x, y)
# -------------------------------------------------------------- ~ & | ^
def test_bitop_identities_1(x):
check(x & 0 == 0, "x & 0 != 0 for", x)
check(x | 0 == x, "x | 0 != x for", x)
check(x ^ 0 == x, "x ^ 0 != x for", x)
check(x & -1 == x, "x & -1 != x for", x)
check(x | -1 == -1, "x | -1 != -1 for", x)
check(x ^ -1 == ~x, "x ^ -1 != ~x for", x)
check(x == ~~x, "x != ~~x for", x)
check(x & x == x, "x & x != x for", x)
check(x | x == x, "x | x != x for", x)
check(x ^ x == 0, "x ^ x != 0 for", x)
check(x & ~x == 0, "x & ~x != 0 for", x)
check(x | ~x == -1, "x | ~x != -1 for", x)
check(x ^ ~x == -1, "x ^ ~x != -1 for", x)
check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x)
for n in range(2*SHIFT):
p2 = 2L ** n
check(x << n >> n == x, "x << n >> n != x for", x, n)
check(x / p2 == x >> n, "x / p2 != x >> n for x n p2", x, n, p2)
check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2)
check(x & -p2 == x >> n << n == x & ~(p2 - 1),
"not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2",
x, n, p2)
def test_bitop_identities_2(x, y):
check(x & y == y & x, "x & y != y & x for", x, y)
check(x | y == y | x, "x | y != y | x for", x, y)
check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y)
check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y)
check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y)
check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y)
check(x ^ y == (x | y) & ~(x & y),
"x ^ y != (x | y) & ~(x & y) for", x, y)
check(x ^ y == (x & ~y) | (~x & y),
"x ^ y == (x & ~y) | (~x & y) for", x, y)
check(x ^ y == (x | y) & (~x | ~y),
"x ^ y == (x | y) & (~x | ~y) for", x, y)
def test_bitop_identities_3(x, y, z):
check((x & y) & z == x & (y & z),
"(x & y) & z != x & (y & z) for", x, y, z)
check((x | y) | z == x | (y | z),
"(x | y) | z != x | (y | z) for", x, y, z)
check((x ^ y) ^ z == x ^ (y ^ z),
"(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z)
check(x & (y | z) == (x & y) | (x & z),
"x & (y | z) != (x & y) | (x & z) for", x, y, z)
check(x | (y & z) == (x | y) & (x | z),
"x | (y & z) != (x | y) & (x | z) for", x, y, z)
def test_bitop_identities(maxdigits=MAXDIGITS):
print "long bit-operation identities"
for x in special:
test_bitop_identities_1(x)
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
test_bitop_identities_1(x)
for leny in digits:
y = getran(leny)
test_bitop_identities_2(x, y)
test_bitop_identities_3(x, y, getran((lenx + leny)/2))
# ------------------------------------------------- hex oct repr str atol
def slow_format(x, base):
if (x, base) == (0, 8):
# this is an oddball!
return "0L"
digits = []
sign = 0
if x < 0:
sign, x = 1, -x
while x:
x, r = divmod(x, base)
digits.append(int(r))
digits.reverse()
digits = digits or [0]
return '-'[:sign] + \
{8: '0', 10: '', 16: '0x'}[base] + \
join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \
"L"
def test_format_1(x):
from string import atol
for base, mapper in (8, oct), (10, repr), (16, hex):
got = mapper(x)
expected = slow_format(x, base)
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x)
# str() has to be checked a little differently since there's no
# trailing "L"
got = str(x)
expected = slow_format(x, 10)[:-1]
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
def test_format(maxdigits=MAXDIGITS):
print "long str/hex/oct/atol"
for x in special:
test_format_1(x)
for i in range(10):
for lenx in range(1, maxdigits+1):
x = getran(lenx)
test_format_1(x)
# ----------------------------------------------------------------- misc
def test_misc(maxdigits=MAXDIGITS):
print "long miscellaneous operations"
import sys
# check the extremes in int<->long conversion
hugepos = sys.maxint
hugeneg = -hugepos - 1
hugepos_aslong = long(hugepos)
hugeneg_aslong = long(hugeneg)
check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint")
check(hugeneg == hugeneg_aslong,
"long(-sys.maxint-1) != -sys.maxint-1")
# long -> int should not fail for hugepos_aslong or hugeneg_aslong
try:
check(int(hugepos_aslong) == hugepos,
"converting sys.maxint to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(sys.maxint)) overflowed!"
try:
check(int(hugeneg_aslong) == hugeneg,
"converting -sys.maxint-1 to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(-sys.maxint-1)) overflowed!"
# but long -> int should overflow for hugepos+1 and hugeneg-1
x = hugepos_aslong + 1
try:
int(x)
raise ValueError
except OverflowError:
pass
except:
raise TestFailed, "int(long(sys.maxint) + 1) didn't overflow"
x = hugeneg_aslong - 1
try:
int(x)
raise ValueError
except OverflowError:
pass
except:
raise TestFailed, "int(long(-sys.maxint-1) - 1) didn't overflow"
# ---------------------------------------------------------------- do it
test_division()
test_bitop_identities()
test_format()
test_misc()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/test/test_long.py
|
Python
|
mit
| 8,805
|
from django.views.generic import TemplateView
# All todos view
class Home( TemplateView ):
# Set the view template
template_name = 'index.html'
|
pombredanne/todomvc-django
|
todo/views.py
|
Python
|
mit
| 148
|
"""A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords",
"digits", "hexdigits", "octdigits", "printable", "punctuation",
"whitespace", "Formatter", "Template"]
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap as _ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'safe_substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(*args, **kwargs):
if not args:
raise TypeError("descriptor 'format' of 'Formatter' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
try:
format_string, *args = args # allow the "format_string" keyword be passed
except ValueError:
if 'format_string' in kwargs:
format_string = kwargs.pop('format_string')
import warnings
warnings.warn("Passing 'format_string' as keyword argument is "
"deprecated", DeprecationWarning, stacklevel=2)
else:
raise TypeError("format() missing 1 required positional "
"argument: 'format_string'") from None
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=0):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == '':
if auto_arg_index is False:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec, auto_arg_index = self._vformat(
format_spec, args, kwargs,
used_args, recursion_depth-1,
auto_arg_index=auto_arg_index)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result), auto_arg_index
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
|
jameswatt2008/jameswatt2008.github.io
|
python/Python核心编程/网络编程/截图和代码/概述、SOCKET/多进程copy文件/test/string.py
|
Python
|
gpl-2.0
| 11,854
|
from base import *
DIR = "headerop_add2"
HEADERS = [("X-What-Rocks", "Cherokee does"),
("X-What-Sucks", "Failed QA tests"),
("X-What-Is-It", "A successful test")]
CONF = """
vserver!1!rule!2560!match = directory
vserver!1!rule!2560!match!directory = /%(DIR)s
vserver!1!rule!2560!handler = dirlist
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Header Ops: Add multiple headers"
self.request = "GET /%s/ HTTP/1.0\r\n" %(DIR)
self.expected_error = 200
self.conf = CONF%(globals())
n = 2
for h,v in HEADERS:
self.conf += "vserver!1!rule!2560!header_op!%d!type = add\n" %(n)
self.conf += "vserver!1!rule!2560!header_op!%d!header = %s\n" %(n, h)
self.conf += "vserver!1!rule!2560!header_op!%d!value = %s\n" %(n, v)
n += 1
def CustomTest (self):
header = self.reply[:self.reply.find("\r\n\r\n")+2]
for h,v in HEADERS:
if not "%s: %s\r\n" %(h,v) in header:
return -1
return 0
def Prepare (self, www):
self.Mkdir (www, DIR)
|
cherokee/webserver
|
qa/256-header_op-add2.py
|
Python
|
gpl-2.0
| 1,201
|
"""Add theme to config
Revision ID: 58ee75910929
Revises: 1c22ceb384a7
Create Date: 2015-08-28 15:15:47.971807
"""
# revision identifiers, used by Alembic.
revision = '58ee75910929'
down_revision = '1c22ceb384a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO config (category, key, value, description) VALUES ('general', 'theme', '\"zkpylons\"', 'The enabled theme to use. Should match the theme folder name (requires a server restart to take effect)')")
def downgrade():
op.execute("DELETE FROM config WHERE category='general' AND key='theme'")
|
iseppi/zookeepr
|
alembic/versions/20_58ee75910929_add_theme_to_config_.py
|
Python
|
gpl-2.0
| 603
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleantitle
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
from resources.lib.modules import utils
import os,sys,re,json,zipfile,StringIO,urllib,urllib2,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict()
action = params.get('action')
control.moderator()
class seasons:
def __init__(self):
self.list = []
self.lang = control.apiLanguage()['tvdb']
self.showunaired = control.setting('showunaired') or 'true'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_by_imdb = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s'
self.tvdb_by_query = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s'
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
def get(self, tvshowtitle, year, imdb, tvdb, idx=True, create_directory=True):
if control.window.getProperty('PseudoTVRunning') == 'True':
return episodes().get(tvshowtitle, year, imdb, tvdb)
if idx == True:
self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tvdb, self.lang)
if create_directory == True: self.seasonDirectory(self.list)
return self.list
else:
self.list = self.tvdb_list(tvshowtitle, year, imdb, tvdb, 'en')
return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tvdb, lang, limit=''):
try:
if imdb == '0':
try:
imdb = trakt.SearchTVShow(tvshowtitle, year, full=False)[0]
imdb = imdb.get('show', '0')
imdb = imdb.get('ids', {}).get('imdb', '0')
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
if not imdb: imdb = '0'
except:
imdb = '0'
if tvdb == '0' and not imdb == '0':
url = self.tvdb_by_imdb % imdb
result = client.request(url, timeout='10')
try: tvdb = client.parseDOM(result, 'seriesid')[0]
except: tvdb = '0'
try: name = client.parseDOM(result, 'SeriesName')[0]
except: name = '0'
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name)
if len(dupe) > 0: tvdb = str(dupe[0])
if tvdb == '': tvdb = '0'
if tvdb == '0':
url = self.tvdb_by_query % (urllib.quote_plus(tvshowtitle))
years = [str(year), str(int(year)+1), str(int(year)-1)]
tvdb = client.request(url, timeout='10')
tvdb = re.sub(r'[^\x00-\x7F]+', '', tvdb)
tvdb = client.replaceHTMLCodes(tvdb)
tvdb = client.parseDOM(tvdb, 'Series')
tvdb = [(x, client.parseDOM(x, 'SeriesName'), client.parseDOM(x, 'FirstAired')) for x in tvdb]
tvdb = [(x, x[1][0], x[2][0]) for x in tvdb if len(x[1]) > 0 and len(x[2]) > 0]
tvdb = [x for x in tvdb if cleantitle.get(tvshowtitle) == cleantitle.get(x[1])]
tvdb = [x[0][0] for x in tvdb if any(y in x[2] for y in years)][0]
tvdb = client.parseDOM(tvdb, 'seriesid')[0]
if tvdb == '': tvdb = '0'
except:
return
try:
if tvdb == '0': return
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
dupe = client.parseDOM(result, 'SeriesName')[0]
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe)
if len(dupe) > 0:
tvdb = str(dupe[0]).encode('utf-8')
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
if not lang == 'en':
url = self.tvdb_info_link % (tvdb, lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result2 = zip.read('%s.xml' % lang)
zip.close()
else:
result2 = result
artwork = artwork.split('<Banner>')
artwork = [i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i]
artwork = [i for i in artwork if not 'seasonswide' in re.findall('<BannerPath>(.+?)</BannerPath>', i)[0]]
result = result.split('<Episode>')
result2 = result2.split('<Episode>')
item = result[0] ; item2 = result2[0]
episodes = [i for i in result if '<EpisodeNumber>' in i]
episodes = [i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i]
episodes = [i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i]
seasons = [i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i]
locals = [i for i in result2 if '<EpisodeNumber>' in i]
result = '' ; result2 = ''
if limit == '':
episodes = []
elif limit == '-1':
seasons = []
else:
episodes = [i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i]
seasons = []
try: poster = client.parseDOM(item, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
try: status = client.parseDOM(item, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
try: studio = client.parseDOM(item, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: cast = client.parseDOM(item, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: label = client.parseDOM(item2, 'SeriesName')[0]
except: label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
unaired = ''
except:
pass
for item in seasons:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))):
unaired = 'true'
if self.showunaired != 'true': raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
thumb = [i for i in artwork if client.parseDOM(i, 'Season')[0] == season]
try: thumb = client.parseDOM(thumb[0], 'BannerPath')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if thumb == '0': thumb = poster
self.list.append({'season': season, 'tvshowtitle': tvshowtitle, 'label': label, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'unaired': unaired})
except:
pass
for item in episodes:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))):
unaired = 'true'
if self.showunaired != 'true': raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try:
local = client.parseDOM(item, 'id')[0]
local = [x for x in locals if '<id>%s</id>' % str(local) in x][0]
except:
local = item
label = client.parseDOM(local, 'EpisodeName')[0]
if label == '': label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: episodeplot = client.parseDOM(local, 'Overview')[0]
except: episodeplot = ''
if episodeplot == '': episodeplot = '0'
if episodeplot == '0': episodeplot = plot
episodeplot = client.replaceHTMLCodes(episodeplot)
try: episodeplot = episodeplot.encode('utf-8')
except: pass
self.list.append({'title': title, 'label': label, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': episodeplot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'unaired': unaired})
except:
pass
return self.list
def seasonDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
try: indicators = playcount.getSeasonIndicators(items[0]['imdb'])
except: pass
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
labelMenu = control.lang(32055).encode('utf-8')
playRandom = control.lang(32535).encode('utf-8')
addToLibrary = control.lang(32551).encode('utf-8')
for i in items:
try:
label = '%s %s' % (labelMenu, i['season'])
try:
if i['unaired'] == 'true':
label = '[COLOR darkred][I]%s[/I][/COLOR]' % label
except:
pass
systitle = sysname = urllib.quote_plus(i['tvshowtitle'])
imdb, tvdb, year, season = i['imdb'], i['tvdb'], i['year'], i['season']
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'code': imdb, 'imdbnumber': imdb, 'imdb_id': imdb})
meta.update({'tvdb_id': tvdb})
meta.update({'mediatype': 'tvshow'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'tvshowtitle': i['label']})
except: pass
try:
if season in indicators: meta.update({'playcount': 1, 'overlay': 7})
else: meta.update({'playcount': 0, 'overlay': 6})
except:
pass
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s' % (sysaddon, systitle, year, imdb, tvdb, season)
cm = []
cm.append((playRandom, 'RunPlugin(%s?action=random&rtype=episode&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s)' % (sysaddon, urllib.quote_plus(systitle), urllib.quote_plus(year), urllib.quote_plus(imdb), urllib.quote_plus(tvdb), urllib.quote_plus(season))))
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((watchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=7)' % (sysaddon, systitle, imdb, tvdb, season)))
cm.append((unwatchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=6)' % (sysaddon, systitle, imdb, tvdb, season)))
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, sysname, tvdb)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
cm.append((addToLibrary, 'RunPlugin(%s?action=tvshowToLibrary&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s)' % (sysaddon, systitle, year, imdb, tvdb)))
item = control.item(label=label)
art = {}
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb'], 'poster': i['thumb']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']})
else:
art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setInfo(type='Video', infoLabels = meta)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
try: control.property(syshandle, 'showplot', items[0]['plot'])
except: pass
control.content(syshandle, 'seasons')
control.directory(syshandle, cacheToDisc=True)
views.setView('seasons', {'skin.estuary': 55, 'skin.confluence': 500})
class episodes:
def __init__(self):
self.list = []
self.trakt_link = 'http://api.trakt.tv'
self.tvmaze_link = 'http://api.tvmaze.com'
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.lang = control.apiLanguage()['tvdb']
self.showunaired = control.setting('showunaired') or 'true'
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
self.added_link = 'http://api.tvmaze.com/schedule'
#https://api.trakt.tv/calendars/all/shows/date[30]/31 #use this for new episodes?
#self.mycalendar_link = 'http://api.trakt.tv/calendars/my/shows/date[29]/60/'
self.mycalendar_link = 'http://api.trakt.tv/calendars/my/shows/date[30]/31/' #go back 30 and show all shows aired until tomorrow
self.trakthistory_link = 'http://api.trakt.tv/users/me/history/shows?limit=300'
self.progress_link = 'http://api.trakt.tv/users/me/watched/shows'
self.hiddenprogress_link = 'http://api.trakt.tv/users/hidden/progress_watched?limit=1000&type=show'
self.calendar_link = 'http://api.tvmaze.com/schedule?date=%s'
self.traktlists_link = 'http://api.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api.trakt.tv/users/%s/lists/%s/items'
def get(self, tvshowtitle, year, imdb, tvdb, season=None, episode=None, idx=True, create_directory=True):
try:
if idx == True:
if season == None and episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
elif episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, season)
else:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
num = [x for x,y in enumerate(self.list) if y['season'] == str(season) and y['episode'] == str(episode)][-1]
self.list = [y for x,y in enumerate(self.list) if x >= num]
if create_directory == True: self.episodeDirectory(self.list)
return self.list
else:
self.list = seasons().tvdb_list(tvshowtitle, year, imdb, tvdb, 'en', '-1')
return self.list
except:
pass
def calendar(self, url):
try:
try: url = getattr(self, url + '_link')
except: pass
if self.trakt_link in url and url == self.progress_link:
self.blist = cache.get(self.trakt_progress_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_progress_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and url == self.mycalendar_link:
self.blist = cache.get(self.trakt_episodes_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_episodes_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and '/users/' in url:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.list = self.list[::-1]
elif self.trakt_link in url:
self.list = cache.get(self.trakt_list, 1, url, self.trakt_user)
elif self.tvmaze_link in url and url == self.added_link:
urls = [i['url'] for i in self.calendars(idx=False)][:5]
self.list = []
for url in urls:
self.list += cache.get(self.tvmaze_list, 720, url, True)
elif self.tvmaze_link in url:
self.list = cache.get(self.tvmaze_list, 1, url, False)
self.episodeDirectory(self.list)
return self.list
except:
pass
def widget(self):
if trakt.getTraktIndicatorsInfo() == True:
setting = control.setting('tv.widget.alt')
else:
setting = control.setting('tv.widget')
if setting == '2':
self.calendar(self.progress_link)
elif setting == '3':
self.calendar(self.mycalendar_link)
else:
self.calendar(self.added_link)
def calendars(self, idx=True):
m = control.lang(32060).encode('utf-8').split('|')
try: months = [(m[0], 'January'), (m[1], 'February'), (m[2], 'March'), (m[3], 'April'), (m[4], 'May'), (m[5], 'June'), (m[6], 'July'), (m[7], 'August'), (m[8], 'September'), (m[9], 'October'), (m[10], 'November'), (m[11], 'December')]
except: months = []
d = control.lang(32061).encode('utf-8').split('|')
try: days = [(d[0], 'Monday'), (d[1], 'Tuesday'), (d[2], 'Wednesday'), (d[3], 'Thursday'), (d[4], 'Friday'), (d[5], 'Saturday'), (d[6], 'Sunday')]
except: days = []
for i in range(0, 30):
try:
name = (self.datetime - datetime.timedelta(days = i))
name = (control.lang(32062) % (name.strftime('%A'), name.strftime('%d %B'))).encode('utf-8')
for m in months: name = name.replace(m[1], m[0])
for d in days: name = name.replace(d[1], d[0])
try: name = name.encode('utf-8')
except: pass
url = self.calendar_link % (self.datetime - datetime.timedelta(days = i)).strftime('%Y-%m-%d')
self.list.append({'name': name, 'url': url, 'image': 'calendar.png', 'action': 'calendar'})
except:
pass
if idx == True: self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'calendar'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
itemlist = []
items = trakt.getTraktAsJson(u)
except:
return
for item in items:
try:
title = item['episode']['title']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
season = item['episode']['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
episode = item['episode']['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
premiered = item['episode']['first_aired']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
studio = item['show']['network']
if studio == None: studio = '0'
genre = item['show']['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
try: duration = str(item['show']['runtime'])
except: duration = '0'
if duration == None: duration = '0'
try: rating = str(item['episode']['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
try: votes = str(item['show']['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
mpaa = item['show']['certification']
if mpaa == None: mpaa = '0'
plot = item['episode']['overview']
if plot == None or plot == '': plot = item['show']['overview']
if plot == None or plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
try:
if self.lang == 'en': raise Exception()
item = trakt.getTVShowTranslation(imdb, lang=self.lang, season=season, episode=episode, full=True)
title = item.get('title') or title
plot = item.get('overview') or plot
tvshowtitle = trakt.getTVShowTranslation(imdb, lang=self.lang) or tvshowtitle
except:
pass
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': '0', 'thumb': '0'})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def trakt_progress_list(self, url, user, lang):
try:
url += '?extended=full'
result = trakt.getTraktAsJson(url)
items = []
except:
return
sortorder = control.setting('prgr.sortorder')
for item in result:
try:
num_1 = 0
for i in range(0, len(item['seasons'])):
if item['seasons'][i]['number'] > 0: num_1 += len(item['seasons'][i]['episodes'])
num_2 = int(item['show']['aired_episodes'])
if num_1 >= num_2: raise Exception()
season = str(item['seasons'][-1]['number'])
episode = [x for x in item['seasons'][-1]['episodes'] if 'number' in x]
episode = sorted(episode, key=lambda x: x['number'])
episode = str(episode[-1]['number'])
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int(self.datetime.strftime('%Y')): raise Exception()
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
last_watched = item['last_watched_at']
if last_watched == None or last_watched == '': last_watched = '0'
items.append({'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'snum': season, 'enum': episode, '_last_watched': last_watched})
except:
pass
try:
result = trakt.getTraktAsJson(self.hiddenprogress_link)
result = [str(i['show']['ids']['tvdb']) for i in result]
items = [i for i in items if not i['tvdb'] in result]
except:
pass
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['snum'] == i['snum'] and x['enum'] == i['enum']][0]
item['action'] = 'episodes'
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [x for x in result if '<EpisodeNumber>' in x]
item2 = result[0]
num = [x for x,y in enumerate(item) if re.compile('<SeasonNumber>(.+?)</SeasonNumber>').findall(y)[0] == str(i['snum']) and re.compile('<EpisodeNumber>(.+?)</EpisodeNumber>').findall(y)[0] == str(i['enum'])][-1]
item = [y for x,y in enumerate(item) if x > num][0]
print lang
print item
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
unaired = ''
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))):
unaired = 'true'
if self.showunaired != 'true': raise Exception()
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'snum': i['snum'], 'enum': i['enum'], 'action': 'episodes', 'unaired': unaired, '_last_watched': i['_last_watched'], '_sort_key': max(i['_last_watched'],premiered)})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
try:
if sortorder == '0':
self.list = sorted(self.list, key=lambda k: k['premiered'], reverse=True)
else:
self.list = sorted(self.list, key=lambda k: k['_sort_key'], reverse=True)
except: pass
return self.list
def trakt_episodes_list(self, url, user, lang):
items = self.trakt_list(url, user)
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['season'] == i['season'] and x['episode'] == i['episode']][0]
if item['poster'] == '0': raise Exception()
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [(re.findall('<SeasonNumber>%01d</SeasonNumber>' % int(i['season']), x), re.findall('<EpisodeNumber>%01d</EpisodeNumber>' % int(i['episode']), x), x) for x in result]
item = [x[2] for x in item if len(x[0]) > 0 and len(x[1]) > 0][0]
item2 = result[0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
return self.list
def trakt_user_list(self, url, user):
try:
items = trakt.getTraktAsJson(url)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
return self.list
def tvmaze_list(self, url, limit):
try:
result = client.request(url)
itemlist = []
items = json.loads(result)
except:
return
for item in items:
try:
if not 'english' in item['show']['language'].lower(): raise Exception()
if limit == True and not 'scripted' in item['show']['type'].lower(): raise Exception()
title = item['name']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['name']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['premiered']
year = re.findall('(\d{4})', year)[0]
year = year.encode('utf-8')
imdb = item['show']['externals']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['externals']['thetvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
poster = '0'
try: poster = item['show']['image']['original']
except: poster = '0'
if poster == None or poster == '': poster = '0'
poster = poster.encode('utf-8')
try: thumb1 = item['show']['image']['original']
except: thumb1 = '0'
try: thumb2 = item['image']['original']
except: thumb2 = '0'
if thumb2 == None or thumb2 == '0': thumb = thumb1
else: thumb = thumb2
if thumb == None or thumb == '': thumb = '0'
thumb = thumb.encode('utf-8')
premiered = item['airdate']
try: premiered = re.findall('(\d{4}-\d{2}-\d{2})', premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try: studio = item['show']['network']['name']
except: studio = '0'
if studio == None: studio = '0'
studio = studio.encode('utf-8')
try: genre = item['show']['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = item['show']['runtime']
except: duration = '0'
if duration == None: duration = '0'
duration = str(duration)
duration = duration.encode('utf-8')
try: rating = item['show']['rating']['average']
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = str(rating)
rating = rating.encode('utf-8')
try: plot = item['show']['summary']
except: plot = '0'
if plot == None: plot = '0'
plot = re.sub('<.+?>|</.+?>|\n', '', plot)
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'thumb': thumb})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def episodeDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getTVShowIndicators(refresh=True)
try: multi = [i['tvshowtitle'] for i in items]
except: multi = []
multi = len([x for y,x in enumerate(multi) if x not in multi[:y]])
multi = True if multi > 1 else False
try: sysaction = items[0]['action']
except: sysaction = ''
isFolder = False if not sysaction == 'episodes' else True
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
tvshowBrowserMenu = control.lang(32071).encode('utf-8')
addToLibrary = control.lang(32551).encode('utf-8')
for i in items:
try:
if not 'label' in i: i['label'] = i['title']
if i['label'] == '0':
label = '%sx%02d . %s %s' % (i['season'], int(i['episode']), 'Episode', i['episode'])
else:
label = '%sx%02d . %s' % (i['season'], int(i['episode']), i['label'])
if multi == True:
label = '%s - %s' % (i['tvshowtitle'], label)
try:
if i['unaired'] == 'true':
label = '[COLOR darkred][I]%s[/I][/COLOR]' % label
except:
pass
imdb, tvdb, year, season, episode = i['imdb'], i['tvdb'], i['year'], i['season'], i['episode']
systitle = urllib.quote_plus(i['title'])
systvshowtitle = urllib.quote_plus(i['tvshowtitle'])
syspremiered = urllib.quote_plus(i['premiered'])
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'episode'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, systvshowtitle)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'year': re.findall('(\d{4})', i['premiered'])[0]})
except: pass
try: meta.update({'title': i['label']})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered)
if isFolder == True:
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s' % (sysaddon, systvshowtitle, year, imdb, tvdb, season, episode)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
if multi == True:
cm.append((tvshowBrowserMenu, 'Container.Update(%s?action=seasons&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s,return)' % (sysaddon, systvshowtitle, year, imdb, tvdb)))
try:
overlay = int(playcount.getEpisodeOverlay(indicators, imdb, tvdb, season, episode))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=6)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=7)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, systvshowtitle, tvdb)))
if isFolder == False:
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
cm.append((addToLibrary, 'RunPlugin(%s?action=tvshowToLibrary&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s)' % (sysaddon, systvshowtitle, year, imdb, tvdb)))
item = control.item(label=label)
art = {}
if 'poster' in i and not i['poster'] == '0':
art.update({'poster': i['poster'], 'tvshow.poster': i['poster'], 'season.poster': i['poster']})
else:
art.update({'poster': addonPoster})
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'icon': i['fanart'], 'thumb': i['fanart']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster']})
else:
art.update({'icon': addonFanart, 'thumb': addonFanart})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
except:
pass
control.content(syshandle, 'episodes')
control.directory(syshandle, cacheToDisc=True)
views.setView('episodes', {'skin.estuary': 55, 'skin.confluence': 504})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
|
mrquim/mrquimrepo
|
script.module.exodus/lib/resources/lib/indexers/episodes.py
|
Python
|
gpl-2.0
| 67,539
|
from .main import IMDB
def start():
return IMDB()
config = [{
'name': 'imdb',
'groups': [
{
'tab': 'automation',
'name': 'imdb_automation',
'label': 'IMDB',
'description': 'From any <strong>public</strong> IMDB watchlists. Url should be the RSS link.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
},
],
},
],
}]
|
darren-rogan/CouchPotatoServer
|
couchpotato/core/providers/automation/imdb/__init__.py
|
Python
|
gpl-3.0
| 907
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
user:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run systemctl talking to the service manager of the calling user, rather than the service manager
of the system.
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue deamon-reload to pick up config changes
- systemd: state=restarted daemon_reload: yes name=crond
# Example action to reload service httpd, in all cases
- systemd: name=httpd state=reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
# Example action to enable a timer for dnf-automatic
- systemd:
name: dnf-automatic.timer
state: started
enabled: True
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# ===========================================
# Main control flow
def main():
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']),
user= dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
# initialize
systemctl = module.get_bin_path('systemctl')
if module.params['user']:
systemctl = systemctl + " --user"
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {},
}
# Run daemon-reload first, if requested
if module.params['daemon_reload']:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
#TODO: check if service exists
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc != 0:
module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, unit, err))
# load return of systemctl show into dictionary for easy access and return
k = None
multival = []
for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {}
if line.strip():
if k is None:
if '=' in line:
k,v = line.split('=', 1)
if v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
else:
if line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
if 'LoadState' in result['status'] and result['status']['LoadState'] == 'not-found':
module.fail_json(msg='Could not find the requested service "%r": %s' % (unit, err))
elif 'LoadError' in result['status']:
module.fail_json(msg="Failed to get the service status '%s': %s" % (unit, result['status']['LoadError']))
# mask/unmask the service, if requested
if module.params['masked'] is not None:
masked = (result['status']['LoadState'] == 'masked')
# Change?
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# Deals with init scripts
# if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
initscript = '/etc/init.d/' + unit
if os.path.exists(initscript) and os.access(initscript, os.X_OK) and \
(not out.startswith('disabled') or bool(glob.glob('/etc/rc?.d/S??' + unit))):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if result['status']['ActiveState'] != 'active':
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if result['status']['ActiveState'] == 'active':
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
tedder/ansible-modules-core
|
system/systemd.py
|
Python
|
gpl-3.0
| 14,880
|
# -*- coding: utf-8 -*-
{
'name': 'Booths/Exhibitors Bridge',
'category': 'Marketing/Events',
'version': '1.0',
'summary': 'Event Booths, automatically create a sponsor.',
'description': """
Automatically create a sponsor when renting a booth.
""",
'depends': ['website_event_exhibitor', 'website_event_booth'],
'data': [
'data/event_booth_category_data.xml',
'views/event_booth_category_views.xml',
'views/event_booth_views.xml',
'views/event_booth_registration_templates.xml',
'views/event_booth_templates.xml',
'views/mail_templates.xml'
],
'auto_install': True,
'assets': {
'web.assets_frontend': [
'/website_event_booth_exhibitor/static/src/js/booth_sponsor_details.js',
],
'web.assets_tests': [
'website_event_booth_exhibitor/static/tests/tours/website_event_booth_exhibitor.js',
],
},
'license': 'LGPL-3',
}
|
jeremiahyan/odoo
|
addons/website_event_booth_exhibitor/__manifest__.py
|
Python
|
gpl-3.0
| 974
|
# -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key, value in d.iteritems():
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(View):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
__metaclass__ = MethodViewType
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
|
darren-rogan/CouchPotatoServer
|
libs/flask/views.py
|
Python
|
gpl-3.0
| 5,629
|
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Read content from txt file.
'''
import os, re
from calibre import prepare_string_for_xml, isbytestring
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.conversion.preprocess import DocAnalysis
from calibre.utils.cleantext import clean_ascii_chars
HTML_TEMPLATE = u'<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>%s </title></head><body>\n%s\n</body></html>'
def clean_txt(txt):
'''
Run transformations on the text to put it into
consistent state.
'''
if isbytestring(txt):
txt = txt.decode('utf-8', 'replace')
# Strip whitespace from the end of the line. Also replace
# all line breaks with \n.
txt = '\n'.join([line.rstrip() for line in txt.splitlines()])
# Replace whitespace at the beginning of the line with
txt = re.sub('(?m)(?<=^)([ ]{2,}|\t+)(?=.)', ' ' * 4, txt)
# Condense redundant spaces
txt = re.sub('[ ]{2,}', ' ', txt)
# Remove blank space from the beginning and end of the document.
txt = re.sub('^\s+(?=.)', '', txt)
txt = re.sub('(?<=.)\s+$', '', txt)
# Remove excessive line breaks.
txt = re.sub('\n{5,}', '\n\n\n\n', txt)
#remove ASCII invalid chars : 0 to 8 and 11-14 to 24
txt = clean_ascii_chars(txt)
return txt
def split_txt(txt, epub_split_size_kb=0):
'''
Ensure there are split points for converting
to EPUB. A misdetected paragraph type can
result in the entire document being one giant
paragraph. In this case the EPUB parser will not
be able to determine where to split the file
to accomidate the EPUB file size limitation
and will fail.
'''
#Takes care if there is no point to split
if epub_split_size_kb > 0:
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
length_byte = len(txt)
#Calculating the average chunk value for easy splitting as EPUB (+2 as a safe margin)
chunk_size = long(length_byte / (int(length_byte / (epub_split_size_kb * 1024) ) + 2 ))
#if there are chunks with a superior size then go and break
if (len(filter(lambda x: len(x) > chunk_size, txt.split('\n\n')))) :
txt = '\n\n'.join([split_string_separator(line, chunk_size)
for line in txt.split('\n\n')])
if isbytestring(txt):
txt = txt.decode('utf-8')
return txt
def convert_basic(txt, title='', epub_split_size_kb=0):
'''
Converts plain text to html by putting all paragraphs in
<p> tags. It condense and retains blank lines when necessary.
Requires paragraphs to be in single line format.
'''
txt = clean_txt(txt)
txt = split_txt(txt, epub_split_size_kb)
lines = []
blank_count = 0
# Split into paragraphs based on having a blank line between text.
for line in txt.split('\n'):
if line.strip():
blank_count = 0
lines.append(u'<p>%s</p>' % prepare_string_for_xml(line.replace('\n', ' ')))
else:
blank_count += 1
if blank_count == 2:
lines.append(u'<p> </p>')
return HTML_TEMPLATE % (title, u'\n'.join(lines))
def convert_markdown(txt, title='', disable_toc=False):
from calibre.ebooks.markdown import markdown
extensions=['footnotes', 'tables']
if not disable_toc:
extensions.append('toc')
md = markdown.Markdown(
extensions,
safe_mode=False)
return HTML_TEMPLATE % (title, md.convert(txt))
def convert_textile(txt, title=''):
from calibre.ebooks.textile import textile
html = textile(txt, encoding='utf-8')
return HTML_TEMPLATE % (title, html)
def normalize_line_endings(txt):
txt = txt.replace('\r\n', '\n')
txt = txt.replace('\r', '\n')
return txt
def separate_paragraphs_single_line(txt):
txt = txt.replace('\n', '\n\n')
return txt
def separate_paragraphs_print_formatted(txt):
txt = re.sub(u'(?miu)^(?P<indent>\t+|[ ]{2,})(?=.)', lambda mo: '\n%s' % mo.group('indent'), txt)
return txt
def separate_hard_scene_breaks(txt):
def sep_break(line):
if len(line.strip()) > 0:
return '\n%s\n' % line
else:
return line
txt = re.sub(u'(?miu)^[ \t-=~\/_]+$', lambda mo: sep_break(mo.group()), txt)
return txt
def block_to_single_line(txt):
txt = re.sub(r'(?<=.)\n(?=.)', ' ', txt)
return txt
def preserve_spaces(txt):
'''
Replaces spaces multiple spaces with entities.
'''
txt = re.sub('(?P<space>[ ]{2,})', lambda mo: ' ' + (' ' * (len(mo.group('space')) - 1)), txt)
txt = txt.replace('\t', ' ')
return txt
def remove_indents(txt):
'''
Remove whitespace at the beginning of each line.
'''
txt = re.sub('(?miu)^\s+', '', txt)
return txt
def opf_writer(path, opf_name, manifest, spine, mi):
opf = OPFCreator(path, mi)
opf.create_manifest(manifest)
opf.create_spine(spine)
with open(os.path.join(path, opf_name), 'wb') as opffile:
opf.render(opffile)
def split_string_separator(txt, size):
'''
Splits the text by putting \n\n at the point size.
'''
if len(txt) > size:
txt = ''.join([re.sub(u'\.(?P<ends>[^.]*)$', '.\n\n\g<ends>',
txt[i:i+size], 1) for i in
xrange(0, len(txt), size)])
return txt
def detect_paragraph_type(txt):
'''
Tries to determine the paragraph type of the document.
block: Paragraphs are separated by a blank line.
single: Each line is a paragraph.
print: Each paragraph starts with a 2+ spaces or a tab
and ends when a new paragraph is reached.
unformatted: most lines have hard line breaks, few/no blank lines or indents
returns block, single, print, unformatted
'''
txt = txt.replace('\r\n', '\n')
txt = txt.replace('\r', '\n')
txt_line_count = len(re.findall('(?mu)^\s*.+$', txt))
# Check for hard line breaks - true if 55% of the doc breaks in the same region
docanalysis = DocAnalysis('txt', txt)
hardbreaks = docanalysis.line_histogram(.55)
if hardbreaks:
# Determine print percentage
tab_line_count = len(re.findall('(?mu)^(\t|\s{2,}).+$', txt))
print_percent = tab_line_count / float(txt_line_count)
# Determine block percentage
empty_line_count = len(re.findall('(?mu)^\s*$', txt))
block_percent = empty_line_count / float(txt_line_count)
# Compare the two types - the type with the larger number of instances wins
# in cases where only one or the other represents the vast majority of the document neither wins
if print_percent >= block_percent:
if .15 <= print_percent <= .75:
return 'print'
elif .15 <= block_percent <= .75:
return 'block'
# Assume unformatted text with hardbreaks if nothing else matches
return 'unformatted'
# return single if hardbreaks is false
return 'single'
def detect_formatting_type(txt):
'''
Tries to determine the formatting of the document.
markdown: Markdown formatting is used.
textile: Textile formatting is used.
heuristic: When none of the above formatting types are
detected heuristic is returned.
'''
# Keep a count of the number of format specific object
# that are found in the text.
markdown_count = 0
textile_count = 0
# Check for markdown
# Headings
markdown_count += len(re.findall('(?mu)^#+', txt))
markdown_count += len(re.findall('(?mu)^=+$', txt))
markdown_count += len(re.findall('(?mu)^-+$', txt))
# Images
markdown_count += len(re.findall('(?u)!\[.*?\](\[|\()', txt))
# Links
markdown_count += len(re.findall('(?u)^|[^!]\[.*?\](\[|\()', txt))
# Check for textile
# Headings
textile_count += len(re.findall(r'(?mu)^h[1-6]\.', txt))
# Block quote.
textile_count += len(re.findall(r'(?mu)^bq\.', txt))
# Images
textile_count += len(re.findall(r'(?mu)(?<=\!)\S+(?=\!)', txt))
# Links
textile_count += len(re.findall(r'"[^"]*":\S+', txt))
# paragraph blocks
textile_count += len(re.findall(r'(?mu)^p(<|<>|=|>)?\. ', txt))
# Decide if either markdown or textile is used in the text
# based on the number of unique formatting elements found.
if markdown_count > 5 or textile_count > 5:
if markdown_count > textile_count:
return 'markdown'
else:
return 'textile'
return 'heuristic'
|
yeyanchao/calibre
|
src/calibre/ebooks/txt/processor.py
|
Python
|
gpl-3.0
| 8,709
|
"""
# lint-amnesty, pylint: disable=django-not-configured
# lint-amnesty, pylint: disable=django-not-configured
Bookmarks module.
"""
from collections import namedtuple
DEFAULT_FIELDS = [
'id',
'course_id',
'usage_id',
'block_type',
'created',
]
OPTIONAL_FIELDS = [
'display_name',
'path',
]
PathItem = namedtuple('PathItem', ['usage_key', 'display_name'])
|
edx/edx-platform
|
openedx/core/djangoapps/bookmarks/__init__.py
|
Python
|
agpl-3.0
| 390
|
from __future__ import unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .common import FileDownloader
from .http import HttpFD
from ..utils import (
struct_pack,
struct_unpack,
compat_urlparse,
format_bytes,
encodeFilename,
sanitize_open,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
self.read(1)
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
# I've only found videos with one segment
segment_run_entry = segment_run_table['segment_run'][0]
n_frags = segment_run_entry[1]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
res.append((1, frag_number))
return res
def write_flv_header(stream, metadata):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x00')
# FLVTAG
# Script data
stream.write(b'\x12')
# Size of the metadata with 3 bytes
stream.write(struct_pack('!L', len(metadata))[1:])
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
# Magic numbers extracted from the output files produced by AdobeHDS.php
#(https://github.com/K-S-V/Scripts)
stream.write(b'\x00\x00\x01\x73')
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class F4mFD(FileDownloader):
"""
A downloader for f4m manifests or AdobeHDS.
"""
def real_download(self, filename, info_dict):
man_url = info_dict['url']
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
self.report_destination(filename)
http_dl = HttpQuietDownloader(self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'test': self.params.get('test', False),
})
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
write_flv_header(dest_stream, metadata)
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'downloaded_bytes': 0,
'frag_counter': 0,
}
start = time.time()
def frag_progress_hook(status):
frag_total_bytes = status.get('total_bytes', 0)
estimated_size = (state['downloaded_bytes'] +
(total_frags - state['frag_counter']) * frag_total_bytes)
if status['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_counter'] += 1
progress = self.calc_percent(state['frag_counter'], total_frags)
byte_counter = state['downloaded_bytes']
else:
frag_downloaded_bytes = status['downloaded_bytes']
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_counter'], total_frags)
progress += frag_progress / float(total_frags)
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
self.report_progress(progress, format_bytes(estimated_size),
status.get('speed'), eta)
http_dl.add_progress_hook(frag_progress_hook)
frags_filenames = []
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:
return False
with open(frag_filename, 'rb') as down:
down_data = down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
frags_filenames.append(frag_filename)
dest_stream.close()
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
os.remove(frag_file)
fsize = os.path.getsize(encodeFilename(filename))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
|
huangciyin/youtube-dl
|
youtube_dl/downloader/f4m.py
|
Python
|
unlicense
| 10,503
|
from cinder.exception import *
from cinder.i18n import _
class ProviderMultiVolumeError(CinderException):
msg_fmt = _("volume %(volume_id)s More than one provider_volume are found")
class ProviderMultiSnapshotError(CinderException):
msg_fmt = _("snapshot %(snapshot_id)s More than one provider_snapshot are found")
class ProviderCreateVolumeError(CinderException):
msg_fmt = _("volume %(volume_id)s create request failed,network or provider internal error")
class ProviderCreateSnapshotError(CinderException):
msg_fmt = _("snapshot %(snapshot_id)s create request failed,network or provider internal error")
class ProviderLocationError(CinderException):
msg_fmt = _("provider location error")
class ProviderExportVolumeError(CinderException):
msg_fmt = _("provider export volume error")
class ProviderVolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VgwHostNotFound(NotFound):
message = _("node of %(Vgw_id)s at provider cloud could not be found.")
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
volume/drivers/ec2/exception_ex.py
|
Python
|
apache-2.0
| 1,031
|
#!/usr/bin/env python
#
# Copyright 2013 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for depstree."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest
import jscompiler
class JsCompilerTestCase(unittest.TestCase):
"""Unit tests for jscompiler module."""
def testGetJsCompilerArgs(self):
args = jscompiler._GetJsCompilerArgs(
'path/to/jscompiler.jar',
1.6,
['path/to/src1.js', 'path/to/src2.js'],
['--test_jvm_flag'],
['--test_compiler_flag']
)
self.assertEqual(
['java', '-client', '--test_jvm_flag',
'-jar', 'path/to/jscompiler.jar',
'--js', 'path/to/src1.js',
'--js', 'path/to/src2.js', '--test_compiler_flag'],
args)
args = jscompiler._GetJsCompilerArgs(
'path/to/jscompiler.jar',
1.7,
['path/to/src1.js', 'path/to/src2.js'],
['--test_jvm_flag'],
['--test_compiler_flag'])
self.assertEqual(
['java', '-d32', '-client', '--test_jvm_flag',
'-jar', 'path/to/jscompiler.jar',
'--js', 'path/to/src1.js',
'--js', 'path/to/src2.js',
'--test_compiler_flag'],
args)
self.assertRaises(
jscompiler.JsCompilerError,
lambda: jscompiler._GetJsCompilerArgs(
'path/to/jscompiler.jar',
1.5,
['path/to/src1.js', 'path/to/src2.js'],
['--test_jvm_flag'],
['--test_compiler_flag']))
def testGetJavaVersion(self):
def assertVersion(expected, version_string):
self.assertEquals(expected, version_string)
assertVersion(1.7, _TEST_JAVA_VERSION_STRING)
assertVersion(1.4, 'java version "1.4.0_03-ea"')
_TEST_JAVA_VERSION_STRING = """\
openjdk version "1.7.0-google-v5"
OpenJDK Runtime Environment (build 1.7.0-google-v5-64327-39803485)
OpenJDK Server VM (build 22.0-b10, mixed mode)
"""
if __name__ == '__main__':
unittest.main()
|
gregrperkins/closure-library
|
closure/bin/build/jscompiler_test.py
|
Python
|
apache-2.0
| 2,516
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
def modified_huber_loss_forward(val):
if val < -1:
return -4. * val
elif val < 1:
return (1. - val) * (1. - val)
else:
return 0.
class TestModifiedHuberLossOp(OpTest):
def setUp(self):
self.op_type = 'modified_huber_loss'
samples_num = 32
x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32')
y_np = np.random.choice([0, 1], samples_num).reshape(
(samples_num, 1)).astype('float32')
product_res = x_np * (2. * y_np - 1.)
# keep away from the junction of piecewise function
for pos, val in np.ndenumerate(product_res):
while abs(val - 1.) < 0.05:
x_np[pos] = np.random.uniform(-2., 2.)
y_np[pos] = np.random.choice([0, 1])
product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1)
val = product_res[pos]
self.inputs = {'X': x_np, 'Y': y_np}
loss = np.vectorize(modified_huber_loss_forward)(product_res)
self.outputs = {
'IntermediateVal': product_res.astype('float32'),
'Out': loss.reshape((samples_num, 1)).astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.01)
if __name__ == '__main__':
unittest.main()
|
QiJune/Paddle
|
python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py
|
Python
|
apache-2.0
| 2,103
|
"""Support for Cisco IOS Routers."""
import logging
import re
from pexpect import pxssh
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = vol.All(
PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=""): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
)
def get_scanner(hass, config):
"""Validate the configuration and return a Cisco scanner."""
scanner = CiscoDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class CiscoDeviceScanner(DeviceScanner):
"""This class queries a wireless router running Cisco IOS firmware."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.port = config.get(CONF_PORT)
self.password = config[CONF_PASSWORD]
self.last_results = {}
self.success_init = self._update_info()
_LOGGER.info("Initialized cisco_ios scanner")
def get_device_name(self, device):
"""Get the firmware doesn't save the name of the wireless device."""
return None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def _update_info(self):
"""
Ensure the information from the Cisco router is up to date.
Returns boolean if scanning successful.
"""
string_result = self._get_arp_data()
if string_result:
self.last_results = []
last_results = []
lines_result = string_result.splitlines()
# Remove the first two lines, as they contains the arp command
# and the arp table titles e.g.
# show ip arp
# Protocol Address | Age (min) | Hardware Addr | Type | Interface
lines_result = lines_result[2:]
for line in lines_result:
parts = line.split()
if len(parts) != 6:
continue
# ['Internet', '10.10.11.1', '-', '0027.d32d.0123', 'ARPA',
# 'GigabitEthernet0']
age = parts[2]
hw_addr = parts[3]
if age != "-":
mac = _parse_cisco_mac_address(hw_addr)
age = int(age)
if age < 1:
last_results.append(mac)
self.last_results = last_results
return True
return False
def _get_arp_data(self):
"""Open connection to the router and get arp entries."""
try:
cisco_ssh = pxssh.pxssh()
cisco_ssh.login(
self.host,
self.username,
self.password,
port=self.port,
auto_prompt_reset=False,
)
# Find the hostname
initial_line = cisco_ssh.before.decode("utf-8").splitlines()
router_hostname = initial_line[len(initial_line) - 1]
router_hostname += "#"
# Set the discovered hostname as prompt
regex_expression = f"(?i)^{router_hostname}".encode()
cisco_ssh.PROMPT = re.compile(regex_expression, re.MULTILINE)
# Allow full arp table to print at once
cisco_ssh.sendline("terminal length 0")
cisco_ssh.prompt(1)
cisco_ssh.sendline("show ip arp")
cisco_ssh.prompt(1)
devices_result = cisco_ssh.before
return devices_result.decode("utf-8")
except pxssh.ExceptionPxssh as px_e:
_LOGGER.error("Failed to login via pxssh: %s", px_e)
return None
def _parse_cisco_mac_address(cisco_hardware_addr):
"""
Parse a Cisco formatted HW address to normal MAC.
e.g. convert
001d.ec02.07ab
to:
00:1D:EC:02:07:AB
Takes in cisco_hwaddr: HWAddr String from Cisco ARP table
Returns a regular standard MAC address
"""
cisco_hardware_addr = cisco_hardware_addr.replace(".", "")
blocks = [
cisco_hardware_addr[x : x + 2] for x in range(0, len(cisco_hardware_addr), 2)
]
return ":".join(blocks).upper()
|
aronsky/home-assistant
|
homeassistant/components/cisco_ios/device_tracker.py
|
Python
|
apache-2.0
| 4,678
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MaxAbsScaler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("MaxAbsScalerExample")\
.getOrCreate()
# $example on$
dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")
# Compute summary statistics and generate MaxAbsScalerModel
scalerModel = scaler.fit(dataFrame)
# rescale each feature to range [-1, 1].
scaledData = scalerModel.transform(dataFrame)
scaledData.show()
# $example off$
spark.stop()
|
mrchristine/spark-examples-dbc
|
src/main/python/ml/max_abs_scaler_example.py
|
Python
|
apache-2.0
| 1,515
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
class SoftmaxTest(tf.test.TestCase):
def _npSoftmax(self, features, log=False):
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features -
np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
softmax = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, log=False, use_gpu=False):
np_softmax = self._npSoftmax(np_features, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = tf.nn.log_softmax(np_features)
else:
tf_softmax = tf.nn.softmax(np_features)
out = tf_softmax.eval()
self.assertAllClose(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in each
# batch element.
self.assertAllClose(np.ones(out.shape[0]),
np.sum(out, axis=1))
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5, atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5, atol=1.e-5)
def testShapeMismatch(self):
with self.assertRaises(ValueError):
tf.nn.softmax([0., 1., 2., 3.])
with self.assertRaises(ValueError):
tf.nn.log_softmax([0., 1., 2., 3.])
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array(
[[1., 1., 1., 1.],
[max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = tf.nn.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5, atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testEmpty(self):
with self.test_session():
x = tf.constant([[]], shape=[0, 3])
self.assertEqual(0, tf.size(x).eval())
expected_y = np.array([]).reshape(0, 3)
np.testing.assert_array_equal(expected_y, tf.nn.softmax(x).eval())
if __name__ == "__main__":
tf.test.main()
|
awni/tensorflow
|
tensorflow/python/kernel_tests/softmax_op_test.py
|
Python
|
apache-2.0
| 4,506
|
import abc
import mock
import pytest
from addons.base.tests.utils import MockFolder
from django.utils import timezone
from framework.auth import Auth
from framework.exceptions import HTTPError
from nose.tools import (assert_equal, assert_false, assert_in, assert_is,
assert_is_none, assert_not_in, assert_raises,
assert_true)
from osf_tests.factories import ProjectFactory, UserFactory
from tests.utils import mock_auth
from addons.base import exceptions
from osf_tests.conftest import request_context
pytestmark = pytest.mark.django_db
class OAuthAddonModelTestSuiteMixinBase(object):
___metaclass__ = abc.ABCMeta
@abc.abstractproperty
def short_name(self):
pass
@abc.abstractproperty
def full_name(self):
pass
@abc.abstractproperty
def ExternalAccountFactory(self):
pass
class OAuthAddonUserSettingTestSuiteMixin(OAuthAddonModelTestSuiteMixinBase):
def setUp(self):
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = self.ExternalAccountFactory()
self.user.external_accounts.add(self.external_account)
self.user.save()
self.user_settings = self.user.get_or_add_addon(self.short_name)
def test_mergability(self):
assert self.user_settings.can_be_merged
def test_merge_user_settings(self):
other_node = ProjectFactory()
other_user = other_node.creator
other_account = self.ExternalAccountFactory()
other_user.external_accounts.add(other_account)
other_user_settings = other_user.get_or_add_addon(self.short_name)
other_node_settings = other_node.get_or_add_addon(self.short_name, auth=Auth(other_user))
other_node_settings.set_auth(
user=other_user,
external_account=other_account
)
assert other_node_settings.has_auth
assert other_node._id not in self.user_settings.oauth_grants
assert other_node_settings.user_settings == other_user_settings
self.user.merge_user(other_user)
self.user.save()
other_node_settings.reload()
self.user_settings.reload()
assert other_node_settings.has_auth
assert other_node._id in self.user_settings.oauth_grants
assert other_node_settings.user_settings == self.user_settings
def test_grant_oauth_access_no_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert self.user_settings.oauth_grants == {self.node._id: {self.external_account._id: {}}}
def test_grant_oauth_access_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert self.user_settings.oauth_grants == {
self.node._id: {
self.external_account._id: {'folder': 'fake_folder_id'}
},
}
def test_verify_oauth_access_no_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.ExternalAccountFactory()
)
)
def test_verify_oauth_access_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'another_folder_id'}
)
)
class OAuthAddonNodeSettingsTestSuiteMixin(OAuthAddonModelTestSuiteMixinBase):
@pytest.yield_fixture(autouse=True)
def _request_context(self, app):
context = app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
context.push()
yield context
context.pop()
@abc.abstractproperty
def NodeSettingsFactory(self):
pass
@abc.abstractproperty
def NodeSettingsClass(self):
pass
@abc.abstractproperty
def UserSettingsFactory(self):
pass
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'folder_id': '1234567890',
'owner': self.node
}
def setUp(self):
super(OAuthAddonNodeSettingsTestSuiteMixin, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = self.ExternalAccountFactory()
self.user.add_addon(self.short_name)
self.user.external_accounts.add(self.external_account)
self.user.save()
self.user_settings = self.user.get_addon(self.short_name)
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': '1234567890'}
)
self.user_settings.save()
self.node_settings = self.NodeSettingsFactory(
**self._node_settings_class_kwargs(self.node, self.user_settings)
)
self.node_settings.external_account = self.external_account
self.node_settings.save()
def tearDown(self):
super(OAuthAddonNodeSettingsTestSuiteMixin, self).tearDown()
self.user_settings.delete()
self.external_account.delete()
self.node.delete()
self.user.delete()
@pytest.mark.django_db
def test_configured_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
assert_true(self.node_settings.configured)
def test_configured_false(self):
self.node_settings.clear_settings()
self.node_settings.save()
assert_false(self.node_settings.configured)
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_has_auth_not_verified(self):
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
self.node_settings.reload()
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
assert_equal(
self.user_settings.oauth_grants,
{self.node._id: {}}
)
def test_revoke_remote_access_called(self):
with mock.patch.object(self.user_settings, 'revoke_remote_oauth_access') as mock_revoke:
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
assert_equal(mock_revoke.call_count, 1)
def test_revoke_remote_access_not_called(self):
user2 = UserFactory()
user2.external_accounts.add(self.external_account)
user2.save()
with mock.patch.object(self.user_settings, 'revoke_remote_oauth_access') as mock_revoke:
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
assert_equal(mock_revoke.call_count, 0)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_fields(self):
node_settings = self.NodeSettingsClass(owner=ProjectFactory(), user_settings=self.user_settings)
node_settings.save()
assert_true(node_settings.user_settings)
assert_equal(node_settings.user_settings.owner, self.user)
assert_true(hasattr(node_settings, 'folder_id'))
assert_true(hasattr(node_settings, 'user_settings'))
def test_folder_defaults_to_none(self):
node_settings = self.NodeSettingsClass(user_settings=self.user_settings)
node_settings.save()
assert_is_none(node_settings.folder_id)
def test_has_auth(self):
self.user.external_accounts = []
self.user_settings.reload()
node = ProjectFactory()
settings = self.NodeSettingsClass(user_settings=self.user_settings, owner=node)
settings.save()
assert_false(settings.has_auth)
self.user.external_accounts.add(self.external_account)
settings.set_auth(self.external_account, self.user)
settings.reload()
assert_true(settings.has_auth)
def test_clear_auth(self):
node_settings = self.NodeSettingsFactory()
node_settings.external_account = self.ExternalAccountFactory()
node_settings.user_settings = self.UserSettingsFactory()
node_settings.save()
node_settings.clear_auth()
assert_is_none(node_settings.external_account)
assert_is_none(node_settings.user_settings)
def test_clear_settings(self):
node_settings = self.NodeSettingsFactory()
node_settings.external_account = self.ExternalAccountFactory()
node_settings.user_settings = self.UserSettingsFactory()
node_settings.save()
node_settings.clear_settings()
assert_is_none(node_settings.folder_id)
def test_to_json(self):
settings = self.node_settings
user = UserFactory()
result = settings.to_json(user)
assert_equal(result['addon_short_name'], self.short_name)
def test_delete(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder_id)
old_logs = list(self.node.logs.all())
self.node_settings.delete()
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder_id, None)
assert_true(self.node_settings.deleted)
assert_equal(list(self.node.logs.all()), list(old_logs))
def test_on_delete(self):
self.user.delete_addon(
self.user_settings.oauth_provider.short_name
)
self.node_settings.reload()
assert_is_none(self.node_settings.external_account)
assert_is_none(self.node_settings.user_settings)
def test_deauthorize(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder_id)
self.node_settings.deauthorize(auth=Auth(self.user))
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder_id, None)
last_log = self.node.logs.first()
assert_equal(last_log.action, '{0}_node_deauthorized'.format(self.short_name))
params = last_log.params
assert_in('node', params)
assert_in('project', params)
def test_set_folder(self):
folder_id = '1234567890'
self.node_settings.set_folder(folder_id, auth=Auth(self.user))
self.node_settings.save()
# Folder was set
assert_equal(self.node_settings.folder_id, folder_id)
# Log was saved
last_log = self.node.logs.first()
assert_equal(last_log.action, '{0}_folder_selected'.format(self.short_name))
def test_set_user_auth(self):
node_settings = self.NodeSettingsFactory()
user_settings = self.UserSettingsFactory()
external_account = self.ExternalAccountFactory()
user_settings.owner.external_accounts.add(external_account)
user_settings.save()
node_settings.external_account = external_account
node_settings.set_auth(external_account, user_settings.owner)
node_settings.save()
assert_true(node_settings.has_auth)
assert_equal(node_settings.user_settings, user_settings)
# A log was saved
last_log = node_settings.owner.logs.first()
assert_equal(last_log.action, '{0}_node_authorized'.format(self.short_name))
log_params = last_log.params
assert_equal(log_params['node'], node_settings.owner._id)
assert_equal(last_log.user, user_settings.owner)
def test_serialize_credentials(self):
self.user_settings.external_accounts[0].oauth_key = 'key-11'
self.user_settings.save()
credentials = self.node_settings.serialize_waterbutler_credentials()
expected = {'token': self.node_settings.external_account.oauth_key}
assert_equal(credentials, expected)
def test_serialize_credentials_not_authorized(self):
self.node_settings.user_settings = None
self.node_settings.save()
with assert_raises(exceptions.AddonError):
self.node_settings.serialize_waterbutler_credentials()
def test_serialize_settings(self):
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'folder': self.node_settings.folder_id}
assert_equal(settings, expected)
def test_serialize_settings_not_configured(self):
self.node_settings.clear_settings()
self.node_settings.save()
with assert_raises(exceptions.AddonError):
self.node_settings.serialize_waterbutler_settings()
def test_create_log(self):
action = 'file_added'
path = 'pizza.nii'
nlog = self.node.logs.count()
self.node_settings.create_waterbutler_log(
auth=Auth(user=self.user),
action=action,
metadata={'path': path, 'materialized': path},
)
self.node.reload()
assert_equal(self.node.logs.count(), nlog + 1)
assert_equal(
self.node.logs.latest().action,
'{0}_{1}'.format(self.short_name, action),
)
assert_equal(
self.node.logs.latest().params['path'],
path
)
def test_after_fork_by_authorized_user(self):
fork = ProjectFactory()
clone = self.node_settings.after_fork(
node=self.node, fork=fork, user=self.user_settings.owner
)
assert_equal(clone.user_settings, self.user_settings)
def test_after_fork_by_unauthorized_user(self):
fork = ProjectFactory()
user = UserFactory()
clone = self.node_settings.after_fork(
node=self.node, fork=fork, user=user,
save=True
)
assert_is(clone.user_settings, None)
def test_before_remove_contributor_message(self):
message = self.node_settings.before_remove_contributor(
self.node, self.user)
assert_true(message)
assert_in(self.user.fullname, message)
assert_in(self.node.project_or_component, message)
def test_after_remove_authorized_user_not_self(self):
message = self.node_settings.after_remove_contributor(
self.node, self.user_settings.owner)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_in('You can re-authenticate', message)
def test_after_remove_authorized_user_self(self):
auth = Auth(user=self.user_settings.owner)
message = self.node_settings.after_remove_contributor(
self.node, self.user_settings.owner, auth)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_not_in('You can re-authenticate', message)
def test_after_delete(self):
self.node.remove_node(Auth(user=self.node.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_is_none(self.node_settings.user_settings)
assert_is_none(self.node_settings.folder_id)
class OAuthCitationsTestSuiteMixinBase(OAuthAddonModelTestSuiteMixinBase):
@abc.abstractproperty
def ProviderClass(self):
pass
@abc.abstractproperty
def OAuthProviderClass(self):
pass
class OAuthCitationsNodeSettingsTestSuiteMixin(
OAuthAddonNodeSettingsTestSuiteMixin,
OAuthCitationsTestSuiteMixinBase):
def setUp(self):
super(OAuthCitationsNodeSettingsTestSuiteMixin, self).setUp()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
def test_fetch_folder_name_root(self):
self.node_settings.list_id = 'ROOT'
assert_equal(
self.node_settings.fetch_folder_name,
'All Documents'
)
def test_selected_folder_name_empty(self):
self.node_settings.list_id = None
assert_equal(
self.node_settings.fetch_folder_name,
''
)
def test_selected_folder_name(self):
# Mock the return from api call to get the folder's name
mock_folder = MockFolder()
name = None
with mock.patch.object(self.OAuthProviderClass, '_folder_metadata', return_value=mock_folder):
name = self.node_settings.fetch_folder_name
assert_equal(
name,
'Fake Folder'
)
def test_api_not_cached(self):
# The first call to .api returns a new object
with mock.patch.object(self.NodeSettingsClass, 'oauth_provider') as mock_api:
api = self.node_settings.api
mock_api.assert_called_once()
assert_equal(api, mock_api())
def test_api_cached(self):
# Repeated calls to .api returns the same object
with mock.patch.object(self.NodeSettingsClass, 'oauth_provider') as mock_api:
self.node_settings._api = 'testapi'
api = self.node_settings.api
assert_false(mock_api.called)
assert_equal(api, 'testapi')
############# Overrides ##############
# `pass` due to lack of waterbutler- #
# related events for citation addons #
######################################
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'list_id': 'fake_folder_id',
'owner': self.node
}
def test_serialize_credentials(self):
pass
def test_serialize_credentials_not_authorized(self):
pass
def test_serialize_settings(self):
pass
def test_serialize_settings_not_configured(self):
pass
def test_create_log(self):
pass
def test_set_folder(self):
folder_id = 'fake-folder-id'
folder_name = 'fake-folder-name'
self.node_settings.clear_settings()
self.node_settings.save()
assert_is_none(self.node_settings.list_id)
provider = self.ProviderClass()
provider.set_config(
self.node_settings,
self.user,
folder_id,
folder_name,
auth=Auth(user=self.user),
)
# instance was updated
assert_equal(
self.node_settings.list_id,
'fake-folder-id',
)
# user_settings was updated
# TODO: the call to grant_oauth_access should be mocked
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake-folder-id'}
)
)
log = self.node.logs.latest()
assert_equal(log.action, '{}_folder_selected'.format(self.short_name))
assert_equal(log.params['folder_id'], folder_id)
assert_equal(log.params['folder_name'], folder_name)
@mock.patch('framework.status.push_status_message')
def test_remove_contributor_authorizer(self, mock_push_status):
contributor = UserFactory()
self.node.add_contributor(contributor, permissions=['read', 'write', 'admin'])
self.node.remove_contributor(self.node.creator, auth=Auth(user=contributor))
self.node_settings.reload()
self.user_settings.reload()
assert_false(self.node_settings.has_auth)
assert_false(self.user_settings.verify_oauth_access(self.node, self.external_account))
def test_remove_contributor_not_authorizer(self):
contributor = UserFactory()
self.node.add_contributor(contributor)
self.node.remove_contributor(contributor, auth=Auth(user=self.node.creator))
assert_true(self.node_settings.has_auth)
assert_true(self.user_settings.verify_oauth_access(self.node, self.external_account))
@mock.patch('framework.status.push_status_message')
def test_fork_by_authorizer(self, mock_push_status):
fork = self.node.fork_node(auth=Auth(user=self.node.creator))
self.user_settings.reload()
assert_true(fork.get_addon(self.short_name).has_auth)
assert_true(self.user_settings.verify_oauth_access(fork, self.external_account))
@mock.patch('framework.status.push_status_message')
def test_fork_not_by_authorizer(self, mock_push_status):
contributor = UserFactory()
self.node.add_contributor(contributor)
fork = self.node.fork_node(auth=Auth(user=contributor))
assert_false(fork.get_addon(self.short_name).has_auth)
assert_false(self.user_settings.verify_oauth_access(fork, self.external_account))
class CitationAddonProviderTestSuiteMixin(OAuthCitationsTestSuiteMixinBase):
@abc.abstractproperty
def ApiExceptionClass(self):
pass
def setUp(self):
super(CitationAddonProviderTestSuiteMixin, self).setUp()
self.provider = self.OAuthProviderClass()
@abc.abstractmethod
def test_handle_callback(self):
pass
def test_citation_lists(self):
mock_client = mock.Mock()
mock_folders = [MockFolder()]
mock_list = mock.Mock()
mock_list.items = mock_folders
mock_client.folders.list.return_value = mock_list
mock_client.collections.return_value = mock_folders
self.provider._client = mock_client
mock_account = mock.Mock()
self.provider.account = mock_account
res = self.provider.citation_lists(self.ProviderClass()._extract_folder)
assert_equal(res[1]['name'], mock_folders[0].name)
assert_equal(res[1]['id'], mock_folders[0].json['id'])
def test_client_not_cached(self):
# The first call to .client returns a new client
with mock.patch.object(self.OAuthProviderClass, '_get_client') as mock_get_client:
mock_account = mock.Mock()
mock_account.expires_at = timezone.now()
self.provider.account = mock_account
self.provider.client
mock_get_client.assert_called
assert_true(mock_get_client.called)
def test_client_cached(self):
# Repeated calls to .client returns the same client
with mock.patch.object(self.OAuthProviderClass, '_get_client') as mock_get_client:
self.provider._client = mock.Mock()
res = self.provider.client
assert_equal(res, self.provider._client)
assert_false(mock_get_client.called)
def test_has_access(self):
with mock.patch.object(self.OAuthProviderClass, '_get_client') as mock_get_client:
mock_client = mock.Mock()
mock_error = mock.PropertyMock()
mock_error.status_code = 403
mock_error.text = 'Mocked 403 ApiException'
mock_client.folders.list.side_effect = self.ApiExceptionClass(mock_error)
mock_client.collections.side_effect = self.ApiExceptionClass(mock_error)
mock_get_client.return_value = mock_client
with assert_raises(HTTPError) as exc_info:
self.provider.client
assert_equal(exc_info.exception.code, 403)
|
binoculars/osf.io
|
addons/base/tests/models.py
|
Python
|
apache-2.0
| 24,742
|
__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
import os
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from jnpr.junos import Device
from jnpr.junos.utils.fs import FS
from mock import patch, MagicMock, call
@attr('unit')
class TestFS(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
self.fs = FS(self.dev)
def test_cat_wrong_path_return_none(self):
path = 'test/report'
self.assertEqual(self.fs.cat(path), None)
def test_cat(self):
self.fs._dev.rpc.file_show = MagicMock(side_effect=self._mock_manager)
path = 'test/cat.txt'
self.assertTrue('testing cat functionality' in self.fs.cat(path))
self.fs._dev.rpc.file_show.assert_called_with(filename='test/cat.txt')
def test_cwd(self):
self.fs._dev.rpc.set_cli_working_directory = MagicMock()
folder = 'test/report'
self.fs.cwd(folder)
self.fs._dev.rpc.set_cli_working_directory.\
assert_called_with(directory='test/report')
@patch('jnpr.junos.Device.execute')
def test_pwd(self, mock_execute):
mock_execute.side_effect = MagicMock(side_effect=self._mock_manager)
self.fs.pwd()
self.assertEqual(self.fs.pwd(), '/cf/var/home/rick')
def test_checksum_return_none(self):
path = 'test/report'
self.assertEqual(self.fs.checksum(path), None)
def test_checksum_unknown_calc(self):
path = 'test/report'
self.assertRaises(ValueError, self.fs.checksum, path=path, calc='abc')
def test_checksum_return_rsp(self):
self.fs.dev.rpc.get_sha256_checksum_information = \
MagicMock(side_effect=self._mock_manager)
path = 'test/checksum'
self.assertEqual(self.fs.checksum(path, 'sha256'), 'xxxx')
self.fs.dev.rpc.get_sha256_checksum_information.\
assert_called_with(path='test/checksum')
def test_stat_calling___decode_file(self):
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_stat_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_stat_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'
self.assertEqual(self.fs.stat(path), None)
def test_ls_calling___decode_file(self):
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_ls_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'dir',
'permissions': 555}},
'path': '/var', 'type': 'dir',
'file_count': 1,
'size': 2})
def test_ls_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'
self.assertEqual(self.fs.ls(path), None)
@patch('jnpr.junos.utils.fs.FS._decode_file')
def test_ls_link_path_false(self, mock_decode_file):
mock_decode_file.get.return_value = False
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.fs.ls(path, followlink=False)
mock_decode_file.assert_has_calls(call().get('link'))
def test_ls_brief_true(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path, brief=True),
{'files': ['abc'], 'path': '/var',
'type': 'dir', 'file_count': 1, 'size': 2})
def test_ls_calling___decode_dir_type_symbolic_link(self):
path = 'test/stat/decode_symbolic_link'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'link': 'symlink test',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'link',
'permissions': 555}},
'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_rm_return_true(self):
self.fs.dev.rpc.file_delete = MagicMock(return_value=True)
path = 'test/abc'
self.assertTrue(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_rm_return_false(self):
path = 'test/abc'
self.fs.dev.rpc.file_delete = MagicMock(return_value=False)
self.assertFalse(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_copy_return_true(self):
self.fs.dev.rpc.file_copy = MagicMock()
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_copy_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_copy = MagicMock(side_effect=Exception)
self.assertFalse(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_true(self):
self.fs.dev.rpc.file_rename = MagicMock(return_value=True)
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_rename = MagicMock(return_value=False)
self.assertFalse(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_tgz_return_true(self):
src = 'test/tgz.txt'
dst = 'test/xyz'
self.fs.dev.rpc.file_archive = MagicMock(return_value=True)
self.assertTrue(self.fs.tgz(src, dst))
self.fs.dev.rpc.file_archive.assert_called_once_with(
source='test/tgz.txt',
destination='test/xyz', compress=True)
@patch('jnpr.junos.Device.execute')
def test_tgz_return_error(self, mock_execute):
mock_execute.side_effect = self._mock_manager
src = 'test/tgz.txt'
dst = 'test/xyz'
self.assertTrue('testing tgz' in self.fs.tgz(src, dst))
@patch('jnpr.junos.utils.fs.StartShell')
def test_rmdir(self, mock_StartShell):
path = 'test/rmdir'
print self.fs.rmdir(path)
calls = [
call().__enter__(),
call().__enter__().run('rmdir test/rmdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_mkdir(self, mock_StartShell):
path = 'test/mkdir'
print self.fs.mkdir(path)
calls = [
call().__enter__(),
call().__enter__().run('mkdir -p test/mkdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_symlink(self, mock_StartShell):
src = 'test/tgz.txt'
dst = 'test/xyz'
print self.fs.symlink(src, dst)
calls = [
call().__enter__(),
call().__enter__().run('ln -sf test/tgz.txt test/xyz'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.Device.execute')
def test_storage_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_usage(),
{'/dev/abc':
{'avail_block': 234234,
'used_blocks': 2346455, 'used_pct': '1',
'mount': '/', 'total_blocks': 567431,
'avail': '2F', 'used': '481M',
'total': '4F'}})
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup_check(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup_check(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
if (fname == 'get-rpc-error.xml' or
fname == 'get-index-error.xml' or
fname == 'get-system-core-dumps.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())
elif (fname == 'show-configuration.xml' or
fname == 'show-system-alarms.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc
else:
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
# if 'path' in kwargs and 'detail' in kwargs:
# return self._read_file('dir_list_detail.xml')
if 'path' in kwargs:
if kwargs['path'] == 'test/stat/decode_dir':
return self._read_file('file-list_dir.xml')
elif kwargs['path'] == 'test/stat/decode_file':
return self._read_file('file-list_file.xml')
elif kwargs['path'] == 'test/checksum':
return self._read_file('checksum.xml')
elif kwargs['path'] == 'test/stat/decode_symbolic_link':
return self._read_file('file-list_symlink.xml')
if 'filename' in kwargs:
if kwargs['filename'] == 'test/cat.txt':
return self._read_file('file-show.xml')
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == 'command':
if args[0].text == 'show cli directory':
return self._read_file('show-cli-directory.xml')
elif args[0].tag == 'get-system-storage':
return self._read_file('get-system-storage.xml')
elif args[0].tag == 'request-system-storage-cleanup':
return self._read_file('request-system-storage-cleanup.xml')
elif args[0].tag == 'file-archive':
return self._read_file('file-archive.xml')
|
JamesNickerson/py-junos-eznc
|
tests/unit/utils/test_fs.py
|
Python
|
apache-2.0
| 13,790
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.sites.models import Site
from socialregistration.contrib.twitter.models import TwitterProfile
class TwitterAuth(ModelBackend):
def authenticate(self, twitter_id=None):
try:
return TwitterProfile.objects.get(
twitter_id=twitter_id,
site=Site.objects.get_current()
).user
except TwitterProfile.DoesNotExist:
return None
|
lgapontes/django-socialregistration
|
socialregistration/contrib/twitter/auth.py
|
Python
|
mit
| 487
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.language = ['en']
self.domains = ['dailyreleases.net']
self.base_link = 'http://dailyreleases.net'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
items += zip(client.parseDOM(post, 'a', attrs={'target': '_blank'}), client.parseDOM(post, 'a', ret='href', attrs={'target': '_blank'}))
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Dailyrls', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
KodiColdkeys/coldkeys-addons
|
repository/plugin.video.white.devil/resources/lib/sources/dailyrls_wp_jh.py
|
Python
|
gpl-2.0
| 6,200
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SetRasterStyle.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFile,
QgsProcessingOutputRasterLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SetRasterStyle(QgisAlgorithm):
INPUT = 'INPUT'
STYLE = 'STYLE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster tools')
def groupId(self):
return 'rastertools'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading | QgsProcessingAlgorithm.FlagDeprecated
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterFile(self.STYLE,
self.tr('Style file'), extension='qml'))
self.addOutput(QgsProcessingOutputRasterLayer(self.INPUT, self.tr('Styled')))
def name(self):
return 'setstyleforrasterlayer'
def displayName(self):
return self.tr('Set style for raster layer')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
style = self.parameterAsFile(parameters, self.STYLE, context)
with open(style) as f:
xml = "".join(f.readlines())
d = QDomDocument()
d.setContent(xml)
layer.importNamedStyle(d)
layer.triggerRepaint()
return {self.INPUT: layer}
|
physycom/QGIS
|
python/plugins/processing/algs/qgis/SetRasterStyle.py
|
Python
|
gpl-2.0
| 2,755
|