repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
hortonworks/hortonworks-sandbox
|
refs/heads/master
|
desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/asm.py
|
72
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
from pygments.lexers.compiled import DLexer, CppLexer, CLexer
from pygments.token import *
__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'LlvmLexer', 'NasmLexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[a-zA-Z$._0-9@]'
identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
('%' + identifier, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r'[\r\n]+', Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'#.*?\n', Comment)
],
'punctuation': [
(r'[-*,.():]+', Punctuation)
]
}
def analyse_text(text):
return re.match(r'^\.\w+', text, re.M)
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
hex = r'[0-9A-Za-z]'
tokens = {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(GasLexer))),
# Code line with ascii
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
('\t\.\.\.$', Text),
# Relocation line
# (With offset)
('(\t\t\t)('+hex+'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
('(\t\t\t)('+hex+'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
('[^\n]+\n', Other)
]
}
class DObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled D files'
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super(DObjdumpLexer, self).__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C++ files'
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super(CppObjdumpLexer, self).__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C files'
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options)
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][-a-zA-Z$._0-9]*|' + string + ')'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(r'^\s*' + identifier + '\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),#Name.Identifier.Local),
(r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global),
(r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous),
(r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(r'(begin|end'
r'|true|false'
r'|declare|define'
r'|global|constant'
r'|private|linker_private|internal|available_externally|linkonce'
r'|linkonce_odr|weak|weak_odr|appending|dllimport|dllexport'
r'|common|default|hidden|protected|extern_weak|external'
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|deplibs|datalayout|volatile|nuw|nsw|exact|inbounds|align'
r'|addrspace|section|alias|module|asm|sideeffect|gc|dbg'
r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc'
r'|arm_aapcscc|arm_aapcs_vfpcc'
r'|cc|c'
r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture'
r'|byval|nest|readnone|readonly'
r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone'
r'|noimplicitfloat|naked'
r'|type|opaque'
r'|eq|ne|slt|sgt|sle'
r'|sge|ult|ugt|ule|uge'
r'|oeq|one|olt|ogt|ole'
r'|oge|ord|uno|ueq|une'
r'|x'
# instructions
r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl'
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
r'|malloc|alloca|free|load|store|getelementptr'
r'|extractelement|insertelement|shufflevector|getresult'
r'|extractvalue|insertvalue'
r')\b', Keyword),
# Types
(r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
Keyword.Type),
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
class NasmLexer(RegexLexer):
"""
For Nasm (Intel) assembly code.
"""
name = 'NASM'
aliases = ['nasm']
filenames = ['*.asm', '*.ASM']
mimetypes = ['text/x-nasm']
identifier = r'[a-zA-Z$._?][a-zA-Z0-9$._?#@~]*'
hexn = r'(?:0[xX][0-9a-fA-F]+|$0[0-9a-fA-F]*|[0-9]+[0-9a-fA-F]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"])*"|' + r"'(\\'|[^'])*'"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'[a-d][lh]|e?[a-d]x|e?[sb]p|e?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
r'EXPORT|LIBRARY|MODULE')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
(r'^\s*%', Comment.Preproc, 'preproc'),
(identifier + ':', Name.Label),
(r'(%s)(\s+)(equ)' % identifier,
bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration),
'instruction-args'),
(directives, Keyword, 'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
(r'[\r\n]+', Text, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'\n', Text),
(r'[ \t]+', Text),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
|
wenottingham/ansible
|
refs/heads/devel
|
lib/ansible/executor/playbook_executor.py
|
24
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_native, to_text
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self.passwords = passwords
self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
break
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if self._options.syntax:
display.display("No issues encountered")
return result
return result
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
'''
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e)))
return False
return True
|
discoking/avalanche
|
refs/heads/master
|
avalance.py
|
1
|
#!/usr/bin/env python
#This script will discover the vlans on an 802.1Q trunk and print them
#it will add interfaces, and do DHCP discovery, and start responder
#this is a proof of concept and this program will be refined
#only works on Linux
#needs tcpdump installed
import StringIO
import sys
import shlex
import subprocess
from string import Template
eth = "eth0"
responder = Template('./Responder/Responder.py -I $int -wr --lm')
cmd = "/usr/sbin/tcpdump -n -i eth0 -e"
args = shlex.split(cmd)
tcpdump = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print "Press CTRL-C to stop avalanche"
output = StringIO.StringIO()
running = True
vlans = set()
while running:
try:
data = tcpdump.stdout.readline()
if len(data):
packet = data.split()
if packet[5] == "802.1Q":
# print vlans
vlanTag = packet[10].rstrip(',')
if vlanTag not in vlans:
vlans.add(vlanTag)
print "added VLAN " + vlanTag
createInt = "ip link add link " + eth + " name " + eth + "." + vlanTag + " type vlan id " + vlanTag
bringUpInt = "ifconfig " + eth + "." + vlanTag + " up"
getIp = "dhclient " + eth + "." + vlanTag
args = shlex.split(createInt)
run = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
args = shlex.split(bringUpInt)
run = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
args = shlex.split(getIp)
run = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
responderCmd = responder.substitute(int=eth + "." + vlanTag)
args = shlex.split(responderCmd)
run = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
running = False
except KeyboardInterrupt:
tcpdump.kill()
data = tcpdump.stdout.readline()
running = False
|
WeblateOrg/weblate
|
refs/heads/main
|
weblate/trans/models/suggestion.py
|
2
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from copy import copy
from django.conf import settings
from django.db import models, transaction
from django.db.models import Q, Sum
from django.utils.translation import gettext as _
from weblate.checks.models import CHECKS, Check
from weblate.trans.mixins import UserDisplayMixin
from weblate.trans.models.change import Change
from weblate.trans.util import split_plural
from weblate.utils import messages
from weblate.utils.antispam import report_spam
from weblate.utils.fields import JSONField
from weblate.utils.request import get_ip_address, get_user_agent_raw
from weblate.utils.state import STATE_TRANSLATED
class SuggestionManager(models.Manager):
# pylint: disable=no-init
def add(self, unit, target, request, vote=False):
"""Create new suggestion for this unit."""
from weblate.auth.models import get_anonymous
user = request.user if request else get_anonymous()
if unit.translated and unit.target == target:
return False
same_suggestions = self.filter(target=target, unit=unit)
# Do not rely on the SQL as MySQL compares strings case insensitive
for same in same_suggestions:
if same.target == target:
if same.user == user or not vote:
return False
same.add_vote(request, Vote.POSITIVE)
return False
# Create the suggestion
suggestion = self.create(
target=target,
unit=unit,
user=user,
userdetails={
"address": get_ip_address(request),
"agent": get_user_agent_raw(request),
},
)
# Record in change
Change.objects.create(
unit=unit,
suggestion=suggestion,
action=Change.ACTION_SUGGESTION,
user=user,
target=target,
author=user,
)
# Add unit vote
if vote:
suggestion.add_vote(request, Vote.POSITIVE)
# Update suggestion stats
if user is not None:
user.profile.increase_count("suggested")
return suggestion
class SuggestionQuerySet(models.QuerySet):
def order(self):
return self.order_by("-timestamp")
def filter_access(self, user):
if user.is_superuser:
return self
return self.filter(
Q(unit__translation__component__project_id__in=user.allowed_project_ids)
& (
Q(unit__translation__component__restricted=False)
| Q(unit__translation__component_id__in=user.component_permissions)
)
)
class Suggestion(models.Model, UserDisplayMixin):
unit = models.ForeignKey("trans.Unit", on_delete=models.deletion.CASCADE)
target = models.TextField()
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.deletion.CASCADE,
)
userdetails = JSONField()
timestamp = models.DateTimeField(auto_now_add=True)
votes = models.ManyToManyField(
settings.AUTH_USER_MODEL, through="Vote", related_name="user_votes"
)
objects = SuggestionManager.from_queryset(SuggestionQuerySet)()
class Meta:
app_label = "trans"
verbose_name = "string suggestion"
verbose_name_plural = "string suggestions"
def __str__(self):
return "suggestion for {} by {}".format(
self.unit, self.user.username if self.user else "unknown"
)
@transaction.atomic
def accept(self, request, permission="suggestion.accept"):
if not request.user.has_perm(permission, self.unit):
messages.error(request, _("Failed to accept suggestion!"))
return
# Skip if there is no change
if self.unit.target != self.target or self.unit.state < STATE_TRANSLATED:
if self.user and not self.user.is_anonymous:
author = self.user
else:
author = request.user
self.unit.translate(
request.user,
split_plural(self.target),
STATE_TRANSLATED,
author=author,
change_action=Change.ACTION_ACCEPT,
)
# Delete the suggestion
self.delete()
def delete_log(self, user, change=Change.ACTION_SUGGESTION_DELETE, is_spam=False):
"""Delete with logging change."""
if is_spam and self.userdetails:
report_spam(
self.userdetails["address"], self.userdetails["agent"], self.target
)
Change.objects.create(
unit=self.unit, action=change, user=user, target=self.target, author=user
)
self.delete()
def get_num_votes(self):
"""Return number of votes."""
return self.vote_set.aggregate(Sum("value"))["value__sum"] or 0
def add_vote(self, request, value):
"""Add (or updates) vote for a suggestion."""
if request is None or not request.user.is_authenticated:
return
vote, created = Vote.objects.get_or_create(
suggestion=self, user=request.user, defaults={"value": value}
)
if not created or vote.value != value:
vote.value = value
vote.save()
# Automatic accepting
required_votes = self.unit.translation.component.suggestion_autoaccept
if required_votes and self.get_num_votes() >= required_votes:
self.accept(request, "suggestion.vote")
def get_checks(self):
# Build fake unit to run checks
fake_unit = copy(self.unit)
fake_unit.target = self.target
fake_unit.state = STATE_TRANSLATED
source = fake_unit.get_source_plurals()
target = fake_unit.get_target_plurals()
result = []
for check, check_obj in CHECKS.target.items():
if check_obj.check_target(source, target, fake_unit):
result.append(Check(unit=fake_unit, dismissed=False, check=check))
return result
class Vote(models.Model):
"""Suggestion voting."""
suggestion = models.ForeignKey(Suggestion, on_delete=models.deletion.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE
)
value = models.SmallIntegerField(default=0)
POSITIVE = 1
NEGATIVE = -1
class Meta:
unique_together = ("suggestion", "user")
app_label = "trans"
verbose_name = "suggestion vote"
verbose_name_plural = "suggestion votes"
def __str__(self):
return f"{self.value:+d} for {self.suggestion} by {self.user.username}"
|
sn1k/app_mundial
|
refs/heads/master
|
lib/python2.7/site-packages/django/db/models/signals.py
|
74
|
from django.apps import apps
from django.dispatch import Signal
from django.utils import six
class_prepared = Signal(providing_args=["class"])
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def __init__(self, *args, **kwargs):
super(ModelSignal, self).__init__(*args, **kwargs)
self.unresolved_references = {}
class_prepared.connect(self._resolve_references)
def _resolve_references(self, sender, **kwargs):
opts = sender._meta
reference = (opts.app_label, opts.object_name)
try:
receivers = self.unresolved_references.pop(reference)
except KeyError:
pass
else:
for receiver, weak, dispatch_uid in receivers:
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
if isinstance(sender, six.string_types):
try:
app_label, model_name = sender.split('.')
except ValueError:
raise ValueError(
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."
)
try:
sender = apps.get_registered_model(app_label, model_name)
except LookupError:
ref = (app_label, model_name)
refs = self.unresolved_references.setdefault(ref, [])
refs.append((receiver, weak, dispatch_uid))
return
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = ModelSignal(providing_args=["instance"], use_caching=True)
pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
m2m_changed = ModelSignal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], use_caching=True)
pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
pre_syncdb = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"])
post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"])
|
dancingdan/tensorflow
|
refs/heads/master
|
tensorflow/python/training/checkpoint_ops.py
|
46
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_checkpoint_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
ops.NotDifferentiable("GenerateVocabRemapping")
ops.NotDifferentiable("LoadAndRemapMatrix")
def _load_and_remap_matrix(ckpt_path,
old_tensor_name,
new_row_vocab_offset,
num_rows_to_load,
new_col_vocab_size,
initializer,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
max_rows_in_memory=-1):
"""Loads a 2-D (matrix) `Tensor` from checkpoint.
Generates 1D-remappings for rows and columns using the
`GenerateVocabRemapping` op, and initializes any anticipated values with the
provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a
matrix that loads existing values from the checkpoint, while filling out
"missing" values with the newly initialized values. See
contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped
functionality (LoadAndRemapMatrix). This wrapper can be used to perform only
row remapping or only col remapping. If only row remapping is desired,
{new,old}_col_vocab_file should be `None`, and vice versa for column
remapping.
NOTE: This only supports div-partitioning the vocabulary on the 1st dimension
(row axis) via `new_row_vocab_offset`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_offset: A 0-indexed integer representing what line to
start reading at in the new row vocabulary. Used for partitioned
variables.
num_rows_to_load: Number of rows to load for the new vocabulary (note: to
support variable partitioning and partial loading, this does not need to
be the same as the number of entries in `new_row_vocab_file`).
new_col_vocab_size: Number of columns to load - should be the same as the
number of entries in `new_col_vocab_file`, since we don't support
partitioning along the column axis.
initializer: Callable initializer function that accepts a 1-D tensor as the
arg to specify the shape of the returned tensor. Used to initialize
missing values.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis - in which case, `new_row_vocab_offset` and
`num_rows_to_load` work under the assumption that the new row vocab is the
same as the old row vocab.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis - in which case, `new_col_vocab_size` works
under the assumption that the new col vocab is the same as the old col
vocab.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A Tensor of shape `[num_rows_to_load + num_row_oov_buckets,
new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the
specified tensor in the checkpoint, and any missing or OOV values
initialized with the given `initializer`.
Raises:
ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0.
ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is
provided, while the other is not. Same for `old_col_vocab_file` and
`new_col_vocab_file`.
ValueError: If neither row vocabs or col vocabs are provided.
"""
if num_row_oov_buckets < 0:
raise ValueError("num_row_oov_buckets must be >= 0, but received %d" %
num_row_oov_buckets)
if num_col_oov_buckets < 0:
raise ValueError("num_col_oov_buckets must be >= 0, but received %d" %
num_col_oov_buckets)
if bool(old_row_vocab_file) != bool(new_row_vocab_file):
raise ValueError(
"old_row_vocab_file and new_row_vocab_file must both be specified or "
"left unspecified. old_row_vocab_file='{}', new_row_vocab_file='{}'".
format(old_row_vocab_file, new_row_vocab_file))
if bool(old_col_vocab_file) != bool(new_col_vocab_file):
raise ValueError(
"old_col_vocab_file and new_col_vocab_file must both be specified or "
"left unspecified. old_col_vocab_file='{}', new_col_vocab_file='{}'".
format(old_col_vocab_file, new_col_vocab_file))
remap_rows = new_row_vocab_file and old_row_vocab_file
remap_cols = new_col_vocab_file and old_col_vocab_file
if not (remap_rows or remap_cols):
raise ValueError(
"Must provide either row or column vocab files. If no remapping is "
"necessary, consider using `tf.contrib.framework.init_from_checkpoint` "
"instead.")
num_rows_present = num_rows_to_load
if remap_rows:
row_remapping, num_rows_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_row_vocab_file,
old_vocab_file=old_row_vocab_file,
new_vocab_offset=new_row_vocab_offset,
num_new_vocab=num_rows_to_load,
old_vocab_size=old_row_vocab_size))
else:
# Even when the rows are not being reordered, we still need to generate a
# remapping to account for initializing partitioned Variables (when
# new_row_vocab_offset is non-zero).
row_remapping = math_ops.range(
new_row_vocab_offset,
new_row_vocab_offset + num_rows_to_load,
dtype=dtypes.int64)
col_remapping = []
num_cols_present = new_col_vocab_size
if remap_cols:
col_remapping, num_cols_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_col_vocab_file,
old_vocab_file=old_col_vocab_file,
new_vocab_offset=0, # Offset is unused for cols (no partitioning).
num_new_vocab=new_col_vocab_size))
init_vals = initializer([
num_rows_to_load * new_col_vocab_size -
num_rows_present * num_cols_present, 1
])
return_tensor = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=init_vals,
num_rows=num_rows_to_load,
num_cols=new_col_vocab_size,
max_rows_in_memory=max_rows_in_memory)
# Add OOV row(s) and column(s).
if num_row_oov_buckets > 0:
init_row_oov_val = initializer([num_row_oov_buckets, new_col_vocab_size])
init_row_oov_val = ops.convert_to_tensor(init_row_oov_val)
return_tensor = array_ops.concat([return_tensor, init_row_oov_val], 0)
if num_col_oov_buckets > 0:
# We need to add any row OOV to the new column shape.
init_col_oov_val = initializer(
[num_rows_to_load + num_row_oov_buckets, num_col_oov_buckets])
init_col_oov_val = ops.convert_to_tensor(init_col_oov_val)
return_tensor = array_ops.concat([return_tensor, init_col_oov_val], 1)
return return_tensor
def _load_and_remap_matrix_initializer(ckpt_path,
old_tensor_name,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
r"""Returns a var initializer for loading and remapping a 2-D (matrix) tensor.
The returned initializer loads a 2-D (matrix) `Tensor` with name
`old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the
rows/columns according to the specified vocab files and append additional
out-of-vocabulary rows/columns according to the number of OOV buckets.
The format of the file at the `{old,new}_{row,col}_vocab_file` path should be
a text file, with each line containing a single entity within the vocabulary.
Let the function `line_of(f, "x")` return the 0-indexed line number of the
entity "x" in file f, and the function `entity_at(f, i)` return the entity at
line i of file f. Then, row i of the new output matrix will be taken from row
`line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old
matrix. If any entity in `new_row_vocab_file` is not found in
`old_row_vocab_file`, that row is considered a "missing" row, and its values
will be initialized using the `initializer` arg. The same logic also applies
for the columns.
For example, assuming that:
* `old_row_vocab_file` contains "mercury\nvenus\nmars"
* `new_row_vocab_file` contains "venus\njupiter\nmercury"
* `old_col_vocab_file` contains "good\nbetter\nbest"
* `new_col_vocab_file` contains "good\nbest\nfantastic"
* `initializer` returns the natural numbers `[1, 2, 3, 4, ...]`
* `w(i, j)` represents the value from row i, column j of the old matrix
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1],
[2, 3, 4],
[w(0, 0), w(0, 2), 5]]`
If we further specify that:
* `num_row_oov_buckets` == 2
* `num_col_oov_buckets` == 1
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1, 12],
[2, 3, 4, 13],
[w(0, 0), w(0, 2), 5, 14],
[6, 7, 8, 15],
[9, 10, 11, 16]]`
If `{old,new}_row_vocab_file` are None, we assume that the old and new row
vocab files are the same, and no row remapping is done. If
`{old,new}_col_vocab_file` are None, we assume that the old and new column
vocab files are the same, and no column remapping is done.
The returned initializer only supports div-partitioning along the row axis. It
does not support partitioning along the column axis (as this is not common in
practice) or mod-partitioning.
NOTE: When this is used to warm-start variables, client code should use
`tf.lookup.index_table_from_tensor()` like
contrib/layers/python/layers/feature_column.py does, as opposed to
`tf.feature_to_id()` - in order to ensure the underlying lookup tables are the
same.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
if initializer is None:
# TODO(b/25671353): Consider using sqrt(6/(fan_in + fan_out)) instead, from
# Glorot and Bengio, 2010.
initializer = init_ops.zeros_initializer()
if not callable(initializer):
raise TypeError(
"initializer must be callable, instead of being {} of type {}.".format(
initializer, type(initializer)))
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
"""Variable initializer.
Args:
shape: Shape of `Tensor` to return. Should include OOV on both axes.
dtype: Must be float32.
partition_info: variable_scope._PartitionInfo.
Returns:
`Tensor` of shape `shape`.
Raises:
TypeError: If `dtype` is anything other than float32.
ValueError: For shape mismatch upon invocation.
"""
# Sanity checks.
if dtype != dtypes.float32:
raise TypeError(
"Currently, only float32 is supported. Received dtype: {}".format(
dtype))
if len(shape) != 2:
raise ValueError("Expected 2-dim shape, but received: {}".format(shape))
if shape[0] <= 0:
raise ValueError(
"Expected 1st dim of shape to be > 0, but received shape: {}".format(
shape))
if shape[1] != (new_col_vocab_size + num_col_oov_buckets):
raise ValueError(
"Expected 2nd dim of shape to be new_col_vocab_size ({}) + "
"num_col_oov_buckets ({}) = {}, but received shape: {}".format(
new_col_vocab_size, num_col_oov_buckets,
new_col_vocab_size + num_col_oov_buckets, shape))
offset = 0
if partition_info is not None:
offset = partition_info.single_offset(shape)
if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets:
raise ValueError(
"Trying to initialize {} additional rows after {} rows have already "
"been initialized, which would exceed expected total row count of "
"new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.".format(
shape[0], offset, new_row_vocab_size, num_row_oov_buckets,
new_row_vocab_size + num_row_oov_buckets))
row_oov_buckets_to_use = min(shape[0],
max(0, offset + shape[0] - new_row_vocab_size))
num_rows_to_load = shape[0] - row_oov_buckets_to_use
# We may be operating on an OOV-only partition, in which case we newly
# initialize all rows of this partition.
if offset > new_row_vocab_size:
if shape[0] != row_oov_buckets_to_use:
raise ValueError(
"Partitioned variable offset is greater than new vocab size and "
"not operating on OOV-only partition.")
return initializer(shape)
return _load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_offset=offset,
num_rows_to_load=num_rows_to_load,
new_col_vocab_size=new_col_vocab_size,
initializer=initializer,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=row_oov_buckets_to_use,
num_col_oov_buckets=num_col_oov_buckets,
max_rows_in_memory=max_rows_in_memory)
return _initializer
def _load_embedding_initializer(ckpt_path,
embedding_tensor_name,
new_vocab_size,
embedding_dim,
old_vocab_file,
new_vocab_file,
old_vocab_size=-1,
num_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Returns a variable initializer for loading pre-trained embeddings.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
embedding weights and remapping according to the provided vocab files. See
docs for `load_and_remap_matrix_initializer()` for more details.
NOTE: Only for use with div-partitioned variables / vocabularies.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_vocab_size: Number of entries in the new vocab.
embedding_dim: `int` specifying the dimension of the embedding vectors from
the checkpoint. Must match the number of columns in the old embedding
matrix.
old_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
old_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`truncated_normal_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
if initializer is None:
# TODO(b/25671353): This should be kept in sync with the stddev used by
# feature_column.py's _EmbeddingColumn.
initializer = init_ops.truncated_normal_initializer(
stddev=1.0 / math.sqrt(embedding_dim))
return _load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
old_row_vocab_size=old_vocab_size,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
|
yk5/incubator-airflow
|
refs/heads/master
|
airflow/contrib/kubernetes/pod_generator.py
|
10
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.kubernetes.pod import Pod
import uuid
from airflow.contrib.kubernetes.volume_mount import VolumeMount # noqa
from airflow.contrib.kubernetes.volume import Volume # noqa
class PodGenerator:
"""Contains Kubernetes Airflow Worker configuration logic"""
def __init__(self, kube_config=None):
self.kube_config = kube_config
self.volumes = []
self.volume_mounts = []
self.init_containers = []
def add_init_container(self,
name,
image,
security_context,
init_environment,
volume_mounts
):
"""
Adds an init container to the launched pod. useful for pre-
Args:
name (str):
image (str):
security_context (dict):
init_environment (dict):
volume_mounts (dict):
Returns:
"""
self.init_containers.append(
{
'name': name,
'image': image,
'securityContext': security_context,
'env': init_environment,
'volumeMounts': volume_mounts
}
)
def _get_init_containers(self):
return self.init_containers
def add_volume(self, volume):
"""
Args:
volume (Volume):
"""
self._add_volume(name=volume.name, configs=volume.configs)
def _add_volume(self, name, configs):
"""
Args:
name (str):
configs (dict): Configurations for the volume.
Could be used to define PersistentVolumeClaim, ConfigMap, etc...
Returns:
"""
volume_map = {'name': name}
for k, v in configs.items():
volume_map[k] = v
self.volumes.append(volume_map)
def add_volume_with_configmap(self, name, config_map):
self.volumes.append(
{
'name': name,
'configMap': config_map
}
)
def _add_mount(self,
name,
mount_path,
sub_path,
read_only):
"""
Args:
name (str):
mount_path (str):
sub_path (str):
read_only:
Returns:
"""
self.volume_mounts.append({
'name': name,
'mountPath': mount_path,
'subPath': sub_path,
'readOnly': read_only
})
def add_mount(self,
volume_mount):
"""
Args:
volume_mount (VolumeMount):
"""
self._add_mount(
name=volume_mount.name,
mount_path=volume_mount.mount_path,
sub_path=volume_mount.sub_path,
read_only=volume_mount.read_only
)
def _get_volumes_and_mounts(self):
return self.volumes, self.volume_mounts
def _get_image_pull_secrets(self):
"""Extracts any image pull secrets for fetching container(s)"""
if not self.kube_config.image_pull_secrets:
return []
return self.kube_config.image_pull_secrets.split(',')
def make_pod(self, namespace, image, pod_id, cmds, arguments, labels):
volumes, volume_mounts = self._get_volumes_and_mounts()
worker_init_container_spec = self._get_init_containers()
return Pod(
namespace=namespace,
name=pod_id + "-" + str(uuid.uuid1())[:8],
image=image,
cmds=cmds,
args=arguments,
labels=labels,
envs={},
secrets=[],
# service_account_name=self.kube_config.worker_service_account_name,
# image_pull_secrets=self.kube_config.image_pull_secrets,
init_containers=worker_init_container_spec,
volumes=volumes,
volume_mounts=volume_mounts,
resources=None
)
|
Britefury/scikit-image
|
refs/heads/master
|
skimage/io/tests/test_freeimage.py
|
33
|
import os
import skimage as si
import skimage.io as sio
from skimage import data_dir
import numpy as np
from numpy.testing import *
from numpy.testing.decorators import skipif
from tempfile import NamedTemporaryFile
try:
import skimage.io._plugins.freeimage_plugin as fi
FI_available = True
sio.use_plugin('freeimage')
except RuntimeError:
FI_available = False
np.random.seed(0)
def setup_module(self):
"""The effect of the `plugin.use` call may be overridden by later imports.
Call `use_plugin` directly before the tests to ensure that freeimage is
used.
"""
try:
sio.use_plugin('freeimage')
except RuntimeError:
pass
def teardown():
sio.reset_plugins()
@skipif(not FI_available)
def test_imread():
img = sio.imread(os.path.join(si.data_dir, 'color.png'))
assert img.shape == (370, 371, 3)
assert all(img[274, 135] == [0, 130, 253])
@skipif(not FI_available)
def test_imread_truncated_jpg():
assert_raises((RuntimeError, ValueError),
sio.imread,
os.path.join(si.data_dir, 'truncated.jpg'))
@skipif(not FI_available)
def test_imread_uint16():
expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy'))
img = sio.imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16.tif'))
assert img.dtype == np.uint16
assert_array_almost_equal(img, expected)
@skipif(not FI_available)
def test_imread_uint16_big_endian():
expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy'))
img = sio.imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16B.tif'))
assert img.dtype == np.uint16
assert_array_almost_equal(img, expected)
@skipif(not FI_available)
def test_write_multipage():
shape = (64, 64, 64)
x = np.ones(shape, dtype=np.uint8) * np.random.rand(*shape) * 255
x = x.astype(np.uint8)
f = NamedTemporaryFile(suffix='.tif')
fname = f.name
f.close()
fi.write_multipage(x, fname)
y = fi.read_multipage(fname)
assert_array_equal(x, y)
class TestSave:
def roundtrip(self, dtype, x, suffix):
f = NamedTemporaryFile(suffix='.' + suffix)
fname = f.name
f.close()
sio.imsave(fname, x)
y = sio.imread(fname)
assert_array_equal(y, x)
@skipif(not FI_available)
def test_imsave_roundtrip(self):
for shape, dtype, format in [
[(10, 10), (np.uint8, np.uint16), ('tif', 'png')],
[(10, 10), (np.float32,), ('tif',)],
[(10, 10, 3), (np.uint8, np.uint16), ('png',)],
[(10, 10, 4), (np.uint8, np.uint16), ('png',)]
]:
tests = [(d, f) for d in dtype for f in format]
for d, f in tests:
x = np.ones(shape, dtype=d) * np.random.rand(*shape)
if not np.issubdtype(d, float):
x = (x * 255).astype(d)
yield self.roundtrip, d, x, f
@skipif(not FI_available)
def test_metadata():
meta = fi.read_metadata(os.path.join(si.data_dir, 'multipage.tif'))
assert meta[('EXIF_MAIN', 'Orientation')] == 1
assert meta[('EXIF_MAIN', 'Software')].startswith('I')
meta = fi.read_multipage_metadata(os.path.join(si.data_dir,
'multipage.tif'))
assert len(meta) == 2
assert meta[0][('EXIF_MAIN', 'Orientation')] == 1
assert meta[1][('EXIF_MAIN', 'Software')].startswith('I')
@skipif(not FI_available)
def test_collection():
pattern = [os.path.join(data_dir, pic)
for pic in ['camera.png', 'color.png', 'multipage.tif']]
images = sio.ImageCollection(pattern[:-1])
assert len(images) == 2
assert len(images[:]) == 2
images = sio.ImageCollection(pattern)
assert len(images) == 3
assert len(images[:]) == 3
if __name__ == "__main__":
run_module_suite()
|
biodrone/plex-desk
|
refs/heads/master
|
desk/flask/lib/python3.4/site-packages/sqlalchemy/testing/suite/test_ddl.py
|
203
|
from .. import fixtures, config, util
from ..config import requirements
from ..assertions import eq_
from sqlalchemy import Table, Column, Integer, String
class TableDDLTest(fixtures.TestBase):
__backend__ = True
def _simple_fixture(self):
return Table('test_table', self.metadata,
Column('id', Integer, primary_key=True,
autoincrement=False),
Column('data', String(50))
)
def _underscore_fixture(self):
return Table('_test_table', self.metadata,
Column('id', Integer, primary_key=True,
autoincrement=False),
Column('_data', String(50))
)
def _simple_roundtrip(self, table):
with config.db.begin() as conn:
conn.execute(table.insert().values((1, 'some data')))
result = conn.execute(table.select())
eq_(
result.first(),
(1, 'some data')
)
@requirements.create_table
@util.provide_metadata
def test_create_table(self):
table = self._simple_fixture()
table.create(
config.db, checkfirst=False
)
self._simple_roundtrip(table)
@requirements.drop_table
@util.provide_metadata
def test_drop_table(self):
table = self._simple_fixture()
table.create(
config.db, checkfirst=False
)
table.drop(
config.db, checkfirst=False
)
@requirements.create_table
@util.provide_metadata
def test_underscore_names(self):
table = self._underscore_fixture()
table.create(
config.db, checkfirst=False
)
self._simple_roundtrip(table)
__all__ = ('TableDDLTest', )
|
ojarva/django-websocket-redis
|
refs/heads/master
|
ws4redis/websocket.py
|
1
|
# -*- coding: utf-8 -*-
# This code was generously pilfered from https://bitbucket.org/Jeffrey/gevent-websocket
# written by Jeffrey Gelens (http://noppo.pro/) and licensed under the Apache License, Version 2.0
import six
import struct
from socket import error as socket_error
from django.core.handlers.wsgi import logger
from ws4redis.utf8validator import Utf8Validator
from ws4redis.exceptions import WebSocketError, FrameTooLargeException
class WebSocket(object):
__slots__ = ('_closed', 'stream', 'utf8validator', 'utf8validate_last')
OPCODE_CONTINUATION = 0x00
OPCODE_TEXT = 0x01
OPCODE_BINARY = 0x02
OPCODE_CLOSE = 0x08
OPCODE_PING = 0x09
OPCODE_PONG = 0x0a
def __init__(self, wsgi_input):
self._closed = False
self.stream = Stream(wsgi_input)
self.utf8validator = Utf8Validator()
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def _decode_bytes(self, bytestring):
"""
Internal method used to convert the utf-8 encoded bytestring into unicode.
If the conversion fails, the socket will be closed.
"""
if not bytestring:
return u''
try:
return bytestring.decode('utf-8')
except UnicodeDecodeError:
self.close(1007)
raise
def _encode_bytes(self, text):
"""
:returns: The utf-8 byte string equivalent of `text`.
"""
if isinstance(text, six.binary_type):
return text
if not isinstance(text, six.text_type):
text = six.text_type(text or '')
return text.encode('utf-8')
def _is_valid_close_code(self, code):
"""
:returns: Whether the returned close code is a valid hybi return code.
"""
if code < 1000:
return False
if 1004 <= code <= 1006:
return False
if 1012 <= code <= 1016:
return False
if code == 1100:
# not sure about this one but the autobahn fuzzer requires it.
return False
if 2000 <= code <= 2999:
return False
return True
def get_file_descriptor(self):
"""Return the file descriptor for the given websocket"""
return self.stream.fileno
@property
def closed(self):
return self._closed
def handle_close(self, header, payload):
"""
Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame.
"""
if not payload:
self.close(1000, None)
return
if len(payload) < 2:
raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload))
code = struct.unpack('!H', str(payload[:2]))[0]
payload = payload[2:]
if payload:
validator = Utf8Validator()
val = validator.validate(payload)
if not val[0]:
raise UnicodeError
if not self._is_valid_close_code(code):
raise WebSocketError('Invalid close code {0}'.format(code))
self.close(code, payload)
def handle_ping(self, header, payload):
self.send_frame(payload, self.OPCODE_PONG)
def handle_pong(self, header, payload):
pass
def read_frame(self):
"""
Block until a full frame has been read from the socket.
This is an internal method as calling this will not cleanup correctly
if an exception is called. Use `receive` instead.
:return: The header and payload as a tuple.
"""
header = Header.decode_header(self.stream)
if header.flags:
raise WebSocketError
if not header.length:
return header, ''
try:
payload = self.stream.read(header.length)
except socket_error:
payload = ''
except Exception:
# TODO log out this exception
payload = ''
if len(payload) != header.length:
raise WebSocketError('Unexpected EOF reading frame payload')
if header.mask:
payload = header.unmask_payload(payload)
return header, payload
def validate_utf8(self, payload):
# Make sure the frames are decodable independently
self.utf8validate_last = self.utf8validator.validate(payload)
if not self.utf8validate_last[0]:
raise UnicodeError("Encountered invalid UTF-8 while processing "
"text message at payload octet index "
"{0:d}".format(self.utf8validate_last[3]))
def read_message(self):
"""
Return the next text or binary message from the socket.
This is an internal method as calling this will not cleanup correctly
if an exception is called. Use `receive` instead.
"""
opcode = None
message = ""
while True:
header, payload = self.read_frame()
f_opcode = header.opcode
if f_opcode in (self.OPCODE_TEXT, self.OPCODE_BINARY):
# a new frame
if opcode:
raise WebSocketError("The opcode in non-fin frame is expected to be zero, got {0!r}".format(f_opcode))
# Start reading a new message, reset the validator
self.utf8validator.reset()
self.utf8validate_last = (True, True, 0, 0)
opcode = f_opcode
elif f_opcode == self.OPCODE_CONTINUATION:
if not opcode:
raise WebSocketError("Unexpected frame with opcode=0")
elif f_opcode == self.OPCODE_PING:
self.handle_ping(header, payload)
continue
elif f_opcode == self.OPCODE_PONG:
self.handle_pong(header, payload)
continue
elif f_opcode == self.OPCODE_CLOSE:
self.handle_close(header, payload)
return
else:
raise WebSocketError("Unexpected opcode={0!r}".format(f_opcode))
if opcode == self.OPCODE_TEXT:
self.validate_utf8(payload)
message += payload
if header.fin:
break
if opcode == self.OPCODE_TEXT:
self.validate_utf8(message)
return message
else:
return bytearray(message)
def receive(self):
"""
Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored.
"""
if self._closed:
raise WebSocketError("Connection is already closed")
try:
return self.read_message()
except UnicodeError:
logger.info('websocket.receive: UnicodeError')
self.close(1007)
except WebSocketError:
logger.info('websocket.receive: WebSocketError')
self.close(1002)
except Exception as e:
logger.info('websocket.receive: Unknown error %s', e)
raise e
def flush(self):
"""
Flush a websocket. In this implementation intentionally it does nothing.
"""
pass
def send_frame(self, message, opcode):
"""
Send a frame over the websocket with message as its payload
"""
if self._closed:
raise WebSocketError("Connection is already closed")
if opcode == self.OPCODE_TEXT:
message = self._encode_bytes(message)
elif opcode == self.OPCODE_BINARY:
message = six.binary_type(message)
header = Header.encode_header(True, opcode, '', len(message), 0)
try:
self.stream.write(header + message)
except socket_error:
raise WebSocketError("Socket is dead")
def send(self, message, binary=False):
"""
Send a frame over the websocket with message as its payload
"""
if binary is None:
binary = not isinstance(message, six.string_types)
opcode = self.OPCODE_BINARY if binary else self.OPCODE_TEXT
try:
self.send_frame(message, opcode)
except WebSocketError:
raise WebSocketError("Socket is dead")
def close(self, code=1000, message=''):
"""
Close the websocket and connection, sending the specified code and
message. The underlying socket object is _not_ closed, that is the
responsibility of the initiator.
"""
try:
message = self._encode_bytes(message)
self.send_frame(
struct.pack('!H%ds' % len(message), code, message),
opcode=self.OPCODE_CLOSE)
except WebSocketError:
# Failed to write the closing frame but it's ok because we're
# closing the socket anyway.
logger.debug("Failed to write closing frame -> closing socket")
finally:
logger.debug("Closed WebSocket")
self._closed = True
self.stream = None
class Stream(object):
"""
Wraps the handler's socket/rfile attributes and makes it in to a file like
object that can be read from/written to by the lower level websocket api.
"""
__slots__ = ('read', 'write', 'fileno')
def __init__(self, wsgi_input):
if six.PY2:
self.read = wsgi_input._sock.recv
self.write = wsgi_input._sock.sendall
else:
self.read = wsgi_input.raw._sock.recv
self.write = wsgi_input.raw._sock.sendall
self.fileno = wsgi_input.fileno()
class Header(object):
__slots__ = ('fin', 'mask', 'opcode', 'flags', 'length')
FIN_MASK = 0x80
OPCODE_MASK = 0x0f
MASK_MASK = 0x80
LENGTH_MASK = 0x7f
RSV0_MASK = 0x40
RSV1_MASK = 0x20
RSV2_MASK = 0x10
# bitwise mask that will determine the reserved bits for a frame header
HEADER_FLAG_MASK = RSV0_MASK | RSV1_MASK | RSV2_MASK
def __init__(self, fin=0, opcode=0, flags=0, length=0):
self.mask = ''
self.fin = fin
self.opcode = opcode
self.flags = flags
self.length = length
def mask_payload(self, payload):
payload = bytearray(payload)
mask = bytearray(self.mask)
for i in xrange(self.length):
payload[i] ^= mask[i % 4]
return str(payload)
# it's the same operation
unmask_payload = mask_payload
def __repr__(self):
return ("<Header fin={0} opcode={1} length={2} flags={3} at "
"0x{4:x}>").format(self.fin, self.opcode, self.length,
self.flags, id(self))
@classmethod
def decode_header(cls, stream):
"""
Decode a WebSocket header.
:param stream: A file like object that can be 'read' from.
:returns: A `Header` instance.
"""
read = stream.read
data = read(2)
if len(data) != 2:
raise WebSocketError("Unexpected EOF while decoding header")
first_byte, second_byte = struct.unpack('!BB', data)
header = cls(
fin=first_byte & cls.FIN_MASK == cls.FIN_MASK,
opcode=first_byte & cls.OPCODE_MASK,
flags=first_byte & cls.HEADER_FLAG_MASK,
length=second_byte & cls.LENGTH_MASK)
has_mask = second_byte & cls.MASK_MASK == cls.MASK_MASK
if header.opcode > 0x07:
if not header.fin:
raise WebSocketError('Received fragmented control frame: {0!r}'.format(data))
# Control frames MUST have a payload length of 125 bytes or less
if header.length > 125:
raise FrameTooLargeException('Control frame cannot be larger than 125 bytes: {0!r}'.format(data))
if header.length == 126:
# 16 bit length
data = read(2)
if len(data) != 2:
raise WebSocketError('Unexpected EOF while decoding header')
header.length = struct.unpack('!H', data)[0]
elif header.length == 127:
# 64 bit length
data = read(8)
if len(data) != 8:
raise WebSocketError('Unexpected EOF while decoding header')
header.length = struct.unpack('!Q', data)[0]
if has_mask:
mask = read(4)
if len(mask) != 4:
raise WebSocketError('Unexpected EOF while decoding header')
header.mask = mask
return header
@classmethod
def encode_header(cls, fin, opcode, mask, length, flags):
"""
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
"""
first_byte = opcode
second_byte = 0
extra = ''
if fin:
first_byte |= cls.FIN_MASK
if flags & cls.RSV0_MASK:
first_byte |= cls.RSV0_MASK
if flags & cls.RSV1_MASK:
first_byte |= cls.RSV1_MASK
if flags & cls.RSV2_MASK:
first_byte |= cls.RSV2_MASK
# now deal with length complexities
if length < 126:
second_byte += length
elif length <= 0xffff:
second_byte += 126
extra = struct.pack('!H', length)
elif length <= 0xffffffffffffffff:
second_byte += 127
extra = struct.pack('!Q', length)
else:
raise FrameTooLargeException
if mask:
second_byte |= cls.MASK_MASK
extra += mask
return chr(first_byte) + chr(second_byte) + extra
|
nicksergeant/snipt-old
|
refs/heads/master
|
django_authopenid/urls.py
|
1
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext as _
urlpatterns = patterns('django_authopenid.views',
# yadis rdf
url(r'^yadis.xrdf$', 'xrdf', name='yadis_xrdf'),
# manage account registration
url(r'^%s$' % _('signin/'), 'signin', name='user_signin'),
url(r'^%s$' % _('signout/'), 'signout', name='user_signout'),
url(r'^%s%s$' % (_('signin/'), _('complete/')), 'complete_signin',
name='user_complete_signin'),
url(r'^%s$' % _('register/'), 'register', name='user_register'),
url(r'^%s$' % _('signup/'), 'signup', name='user_signup'),
url(r'^%s$' % _('sendpw/'), 'sendpw', name='user_sendpw'),
url(r'^%s%s$' % (_('password/'), _('confirm/')), 'confirmchangepw',
name='user_confirmchangepw'),
# manage account settings
url(r'^$', 'account_settings', name='user_account_settings'),
url(r'^%s$' % _('password/'), 'changepw', name='user_changepw'),
url(r'^%s$' % _('email/'), 'changeemail', name='user_changeemail'),
url(r'^%s$' % _('openid/'), 'changeopenid', name='user_changeopenid'),
url(r'^%s$' % _('delete/'), 'delete', name='user_delete'),
)
|
dd00/commandergenius
|
refs/heads/dd00
|
project/jni/python/src/Lib/test/test_userdict.py
|
56
|
# Check every path through every method of UserDict
from test import test_support, mapping_tests
import UserDict
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = UserDict.IterableUserDict
def test_all(self):
# Test constructors
u = UserDict.UserDict()
u0 = UserDict.UserDict(d0)
u1 = UserDict.UserDict(d1)
u2 = UserDict.IterableUserDict(d2)
uu = UserDict.UserDict(u)
uu0 = UserDict.UserDict(u0)
uu1 = UserDict.UserDict(u1)
uu2 = UserDict.UserDict(u2)
# keyword arg constructor
self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split(), 1), d5)
self.assert_(u1.fromkeys('one two'.split()) is not u1)
self.assert_(isinstance(u1.fromkeys('one two'.split()), UserDict.UserDict))
self.assert_(isinstance(u2.fromkeys('one two'.split()), UserDict.IterableUserDict))
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertEqual(`u2`, `d2`)
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(cmp(a, b), cmp(len(a), len(b)))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = UserDict.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = UserDict.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(UserDict.UserDict):
def display(self): print self
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(u2.keys(), d2.keys())
self.assertEqual(u2.items(), d2.items())
self.assertEqual(u2.values(), d2.values())
# Test has_key and "in".
for i in u2.keys():
self.assert_(u2.has_key(i))
self.assert_(i in u2)
self.assertEqual(u1.has_key(i), d1.has_key(i))
self.assertEqual(i in u1, i in d1)
self.assertEqual(u0.has_key(i), d0.has_key(i))
self.assertEqual(i in u0, i in d0)
# Test update
t = UserDict.UserDict()
t.update(u2)
self.assertEqual(t, u2)
class Items:
def items(self):
return (("x", 42), ("y", 23))
t = UserDict.UserDict()
t.update(Items())
self.assertEqual(t, {"x": 42, "y": 23})
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in xrange(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = UserDict.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assert_(t.has_key("x"))
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = UserDict.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = UserDict.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(UserDict.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(UserDict.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(UserDict.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
UserDict.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(UserDict.UserDict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
##########################
# Test Dict Mixin
class SeqDict(UserDict.DictMixin):
"""Dictionary lookalike implemented with lists.
Used to test and demonstrate DictMixin
"""
def __init__(self, other=None, **kwargs):
self.keylist = []
self.valuelist = []
if other is not None:
for (key, value) in other:
self[key] = value
for (key, value) in kwargs.iteritems():
self[key] = value
def __getitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
return self.valuelist[i]
def __setitem__(self, key, value):
try:
i = self.keylist.index(key)
self.valuelist[i] = value
except ValueError:
self.keylist.append(key)
self.valuelist.append(value)
def __delitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
self.keylist.pop(i)
self.valuelist.pop(i)
def keys(self):
return list(self.keylist)
def copy(self):
d = self.__class__()
for key, value in self.iteritems():
d[key] = value
return d
@classmethod
def fromkeys(cls, keys, value=None):
d = cls()
for key in keys:
d[key] = value
return d
class UserDictMixinTest(mapping_tests.TestMappingProtocol):
type2test = SeqDict
def test_all(self):
## Setup test and verify working of the test class
# check init
s = SeqDict()
# exercise setitem
s[10] = 'ten'
s[20] = 'twenty'
s[30] = 'thirty'
# exercise delitem
del s[20]
# check getitem and setitem
self.assertEqual(s[10], 'ten')
# check keys() and delitem
self.assertEqual(s.keys(), [10, 30])
## Now, test the DictMixin methods one by one
# has_key
self.assert_(s.has_key(10))
self.assert_(not s.has_key(20))
# __contains__
self.assert_(10 in s)
self.assert_(20 not in s)
# __iter__
self.assertEqual([k for k in s], [10, 30])
# __len__
self.assertEqual(len(s), 2)
# iteritems
self.assertEqual(list(s.iteritems()), [(10,'ten'), (30, 'thirty')])
# iterkeys
self.assertEqual(list(s.iterkeys()), [10, 30])
# itervalues
self.assertEqual(list(s.itervalues()), ['ten', 'thirty'])
# values
self.assertEqual(s.values(), ['ten', 'thirty'])
# items
self.assertEqual(s.items(), [(10,'ten'), (30, 'thirty')])
# get
self.assertEqual(s.get(10), 'ten')
self.assertEqual(s.get(15,'fifteen'), 'fifteen')
self.assertEqual(s.get(15), None)
# setdefault
self.assertEqual(s.setdefault(40, 'forty'), 'forty')
self.assertEqual(s.setdefault(10, 'null'), 'ten')
del s[40]
# pop
self.assertEqual(s.pop(10), 'ten')
self.assert_(10 not in s)
s[10] = 'ten'
self.assertEqual(s.pop("x", 1), 1)
s["x"] = 42
self.assertEqual(s.pop("x", 1), 42)
# popitem
k, v = s.popitem()
self.assert_(k not in s)
s[k] = v
# clear
s.clear()
self.assertEqual(len(s), 0)
# empty popitem
self.assertRaises(KeyError, s.popitem)
# update
s.update({10: 'ten', 20:'twenty'})
self.assertEqual(s[10], 'ten')
self.assertEqual(s[20], 'twenty')
# cmp
self.assertEqual(s, {10: 'ten', 20:'twenty'})
t = SeqDict()
t[20] = 'twenty'
t[10] = 'ten'
self.assertEqual(s, t)
def test_main():
test_support.run_unittest(
UserDictTest,
UserDictMixinTest
)
if __name__ == "__main__":
test_main()
|
arju88nair/projectCulminate
|
refs/heads/master
|
venv/lib/python3.5/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
|
356
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
|
neillc/memberdb-ng
|
refs/heads/master
|
backend/views/members.py
|
1
|
from datetime import datetime
from flask import flash, redirect, render_template, request
from flask_mail import Message
from flask_login import login_required, current_user
from backend import app, db, mail
from backend.models import Members
from backend.models.members import OrgMembers, MemberTypes
app.api_manager.create_api(
Members,
methods=['GET', 'POST', 'PATCH', 'DELETE'],
collection_name='members'
)
@app.route('/confirm-membership/<id>')
def confirm_membership(id):
org_id = app.config['ORG_ID']
member = Members.query.filter_by(id=id).one()
pending_member_type = MemberTypes.query.filter_by(description='Pending').one()
already_confirmed = [
org_member for org_member
in member.organisations
if org_member.member_type.description ==
pending_member_type.description
]
if already_confirmed:
message = 'Already Confirmed!'
else:
org_member = OrgMembers(
member_id=id,
org_id=org_id,
member_type=pending_member_type,
start_date=datetime.now()
)
db.session.add(org_member)
db.session.commit()
message = 'Application Confirmed'
return render_template('confirmed.html', message=message)
@app.route('/approve-members')
def approve_members():
pending_approval = Members.query.join(OrgMembers).\
filter(OrgMembers.expiry is None).join(MemberTypes).\
filter(MemberTypes.description == 'Pending').all()
return render_template('approve_members.html', members=pending_approval)
@app.route('/approve-member/<id>')
def approve_member(id):
member = Members.query.get(id)
approved_member_type = MemberTypes.query.filter_by(
description='Approved').one()
for org in member.organisations:
if org.member_type.description == 'Pending':
org.expires = datetime.now()
member.organisations.append(
OrgMembers(
member_type=approved_member_type,
org_id=1, # TODO: this should not be hard coded
start_date=datetime.now()
)
)
flash('Member ({member.first_name} {member.last_name}) has been approved'.
format(member=member))
message_body = render_template(
'mail/membership_approved.tpl',
member=member
)
subject = "Your application for membership of " + \
"{org_name} has been approved"
subject = subject.format(org_name='Linux Australia')
mail.send(
Message(
subject=subject,
body=message_body,
recipients=[member.email]
)
)
return redirect('/approve-members')
@app.route('/profile', methods=['POST', 'GET'])
@login_required
def profile():
from hashlib import md5
import random
import string
from backend.forms import ProfileForm
from backend.models.members import Passwd
member = Members.query.get(current_user.id)
if request.method == 'POST':
form = ProfileForm(request.form)
else:
form = ProfileForm()
form.id.data = member.id
form.email.data = member.email
form.first_name.data = member.first_name
form.middle_name.data = member.middle_name
form.last_name.data = member.last_name
form.DOB.data = member.DOB
form.sex.data = member.sex
form.address1.data = member.address1
form.address2.data = member.address2
form.suburb.data = member.suburb
form.postcode.data = member.postcode
form.state.data = member.state
form.country.data = member.country
form.phone_home.data = member.phone_home
form.phone_mobile.data = member.phone_mobile
if request.method == 'POST' and form.validate_on_submit():
flash('Profile edited')
if form.id.data:
member = Members.query.get(form.id.data)
else:
member = Members()
member.first_name = form.first_name.data
member.middle_name = form.middle_name.data
member.last_name = form.last_name.data
member.DOB = form.DOB.data
member.sex = form.sex.data
member.address1 = form.address1.data
member.address2 = form.address2.data
member.suburb = form.suburb.data
member.postcode = form.postcode.data
member.state = form.state.data
member.country = form.country.data
member.email = form.email.data
member.phone_home = form.phone_home.data
member.phone_mobile = form.phone_mobile.data
if form.password.data:
salt = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits
) for _ in range(5))
member.passwd[0].salt = salt
member.passwd[0].password = md5(
salt.encode() +
form.password.data.encode()
).hexdigest()
db.session.commit()
return render_template('profile.html', form=form)
# @app.route('/members')
# def index():
# members = []
# for member in app.session.query(app.Members).all():
# if member.date_entered:
# date_entered = member.date_entered.strftime('%Y-%m-%d %H:%M:%s')
# else:
# date_entered = None
#
# members.append({
# 'id':member.id,
# 'date_entered': date_entered,
# 'first_name': member.first_name, 'middle_name':member.middle_name,
# 'last_name': member.last_name,
# 'DOB': member.DOB.strftime('%Y-%m-%d HH:mm:ss') \
# if member.DOB else None,
# 'sex': member.sex,
# 'address1': member.address1, 'address2':member.address2,
# 'suburb': member.suburb, 'postcode':member.postcode,
# 'state': member.state, 'country':member.country,
# 'email': member.email, 'phone_home':member.phone_home,
# 'phone_mobile': member.phone_mobile
# })
# return json.dumps(members)
#
|
hn8841182/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/unittest/__main__.py
|
737
|
"""Main entry point"""
import sys
if sys.argv[0].endswith("__main__.py"):
import os.path
# We change sys.argv[0] to make help message more useful
# use executable without path, unquoted
# (it's just a hint anyway)
# (if you have spaces in your executable you get what you deserve!)
executable = os.path.basename(sys.executable)
sys.argv[0] = executable + " -m unittest"
del os
__unittest = True
from .main import main, TestProgram, USAGE_AS_MAIN
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
|
olafhauk/mne-python
|
refs/heads/master
|
examples/forward/plot_source_space_morphing.py
|
19
|
"""
=========================
Use source space morphing
=========================
This example shows how to use source space morphing (as opposed to
:class:`~mne.SourceEstimate` morphing) to create data that can be compared
between subjects.
.. warning:: Source space morphing will likely lead to source spaces that are
less evenly sampled than source spaces created for individual
subjects. Use with caution and check effects on localization
before use.
"""
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Eric larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import mne
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-5120-bem-sol.fif')
fname_src_fs = op.join(subjects_dir, 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
# Get relevant channel information
info = mne.io.read_info(raw_fname)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False,
exclude=[]))
# Morph fsaverage's source space to sample
src_fs = mne.read_source_spaces(fname_src_fs)
src_morph = mne.morph_source_spaces(src_fs, subject_to='sample',
subjects_dir=subjects_dir)
# Compute the forward with our morphed source space
fwd = mne.make_forward_solution(info, trans=fname_trans,
src=src_morph, bem=fname_bem)
mag_map = mne.sensitivity_map(fwd, ch_type='mag')
# Return this SourceEstimate (on sample's surfaces) to fsaverage's surfaces
mag_map_fs = mag_map.to_original_src(src_fs, subjects_dir=subjects_dir)
# Plot the result, which tracks the sulcal-gyral folding
# outliers may occur, we'll place the cutoff at 99 percent.
kwargs = dict(clim=dict(kind='percent', lims=[0, 50, 99]),
# no smoothing, let's see the dipoles on the cortex.
smoothing_steps=1, hemi='rh', views=['lat'])
# Now note that the dipoles on fsaverage are almost equidistant while
# morphing will distribute the dipoles unevenly across the given subject's
# cortical surface to achieve the closest approximation to the average brain.
# Our testing code suggests a correlation of higher than 0.99.
brain_subject = mag_map.plot( # plot forward in subject source space (morphed)
time_label='Morphed', subjects_dir=subjects_dir, **kwargs)
brain_fs = mag_map_fs.plot( # plot forward in original source space (remapped)
time_label='Remapped', subjects_dir=subjects_dir, **kwargs)
|
dredgar/ss-info-fetcher
|
refs/heads/master
|
lib/bs4/tests/test_soup.py
|
62
|
# -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
from pdb import set_trace
import logging
import unittest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
)
import bs4.dammit
from bs4.dammit import (
EntitySubstitution,
UnicodeDammit,
EncodingDetector,
)
from bs4.testing import (
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError, e:
LXML_PRESENT = False
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = u"<h1>éé</h1>"
soup = self.soup(data)
self.assertEqual(u"éé", soup.h1.string)
def test_embedded_null(self):
data = u"<h1>foo\0bar</h1>"
soup = self.soup(data)
self.assertEqual(u"foo\0bar", soup.h1.string)
def test_exclude_encodings(self):
utf8_data = u"Räksmörgås".encode("utf-8")
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual("windows-1252", soup.original_encoding)
class TestWarnings(SoupTest):
def _no_parser_specified(self, s, is_there=True):
v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80])
self.assertTrue(v)
def test_warning_if_no_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_warning_if_parser_specified_too_vague(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_no_warning_if_explicit_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html.parser")
self.assertEqual([], w)
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
class TestWarnings(SoupTest):
def test_disk_file_warning(self):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
try:
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
msg = str(w[0].message)
self.assertTrue("looks like a filename" in msg)
finally:
filehandle.close()
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
self.assertEqual(0, len(w))
def test_url_warning_with_bytes_url(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/")
# Be aware this isn't the only warning that can be raised during
# execution..
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_url(self):
with warnings.catch_warnings(record=True) as warning_list:
# note - this url must differ from the bytes one otherwise
# python's warnings system swallows the second warning
soup = self.soup(u"http://www.crummyunicode.com/")
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_bytes_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(u"http://www.crummyuncode.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
u"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"&Aacute;T&T")
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml_containing_entities("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, unicode))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = u"I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
self.assertEqual(dammit.unicode_markup, markup)
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup, u'Sacr\xe9 bleu! \N{SNOWMAN}')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_exclude_encodings(self):
# This is UTF-8.
utf8_data = u"Räksmörgås".encode("utf-8")
# But if we exclude UTF-8 from consideration, the guess is
# Windows-1252.
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual(dammit.original_encoding.lower(), 'windows-1252')
# And if we exclude that, there is no valid guess at all.
dammit = UnicodeDammit(
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
self.assertEqual(dammit.original_encoding, None)
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
detected = EncodingDetector(
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
encodings = list(detected.encodings)
assert u'utf-\N{REPLACEMENT CHARACTER}' in encodings
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue(u"\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual(u"<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
|
chauffer/pyamazonclouddrive
|
refs/heads/master
|
test-cojp.py
|
10
|
#!/usr/bin/env python
#
# Copyright (c) 2011 anatanokeitai.com(sakurai_youhei)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import pyacd
pyacd.set_amazon_domain("www.amazon.co.jp")
import os, sys
sys.path.insert(0,os.path.dirname(__file__))
import test
test.main()
|
2014c2g2/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/minidom.py
|
727
|
"""Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
jelugbo/hebs_master
|
refs/heads/master
|
lms/envs/devgroups/h_cs50.py
|
74
|
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .courses import *
DATABASES = course_db_for('HarvardX/CS50x/2012')
|
LudovicRousseau/pyscard
|
refs/heads/master
|
smartcard/test/framework/testcase_ulist.py
|
1
|
#! /usr/bin/env python3
"""Unit tests for ulist
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from smartcard.ulist import ulist
class C(ulist):
def __onadditem__(self, item):
#print '+', item
pass
def __onremoveitem__(self, item):
#print '-', item
pass
class testcase_ulist(unittest.TestCase):
"""Test smartcard.ulist."""
def testcase_ulist_init(self):
"""tests constructor"""
c = C([1, 2, 3, 3, 4, 5, 5])
self.assertEqual([1, 2, 3, 4, 5], c)
c = C(['one', 'two', 'three', 'one'])
self.assertEqual(['one', 'two', 'three'], c)
def testcase_ulist_add(self):
"""tests l=l+other"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
c = c + []
self.assertEqual(seed, c)
c = c + 4
self.assertEqual(seed + [4], c)
c = c + 4
self.assertEqual(seed + [4], c)
c = c + 'word'
self.assertEqual(seed + [4] + ['word'], c)
seed = ['one', 'two', 'three']
c = C(seed)
self.assertEqual(seed, c)
c = c + ['four', 'five']
self.assertEqual(seed + ['four', 'five'], c)
def testcase_ulist_iadd(self):
"""tests l+=other"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
c += []
self.assertEqual(seed, c)
c += 4
self.assertEqual(seed + [4], c)
c += 4
self.assertEqual(seed + [4], c)
c += [4, 3, 2, 1]
self.assertEqual(seed + [4], c)
c += 'word'
self.assertEqual(seed + [4] + ['word'], c)
seed = ['one', 'two', 'three']
c = C(seed)
self.assertEqual(seed, c)
c += ['four', 'five']
self.assertEqual(seed + ['four', 'five'], c)
def testcase_ulist_radd(self):
"""tests l=other+l"""
seed = [1, 2, 3]
c = C(seed)
self.assertEqual(seed, c)
l = [] + c
self.assertEqual(seed, l)
l = [3] + c
self.assertEqual(seed, c)
self.assertEqual(seed, l)
l = [3, 3, 4, 4] + c
self.assertEqual(seed, c)
self.assertEqual(seed + [4], l)
l = [4] + ['word'] + c
self.assertEqual(seed, c)
self.assertEqual(seed + [4] + ['word'], l)
def testcase_ulist_append(self):
seed = [1, 2, 3]
c = C(seed)
c.append(4)
self.assertEqual(seed + [4], c)
c.append(4)
self.assertEqual(seed + [4], c)
c.append('word')
self.assertEqual(seed + [4] + ['word'], c)
def testcase_ulist_insert(self):
seed = [1, 2, 3]
c = C(seed)
c.insert(0, 0)
self.assertEqual([0] + seed, c)
c.insert(1, 0)
self.assertEqual([0] + seed, c)
def testcase_ulist_pop(self):
seed = [1, 2, 3]
c = C(seed)
c.pop()
self.assertEqual(c, [1, 2])
c.pop(1)
self.assertEqual(c, [1])
def testcase_ulist_remove(self):
seed = [1, 2, 3]
c = C(seed)
c.remove(2)
self.assertEqual(c, [1, 3])
c.remove(1)
self.assertEqual(c, [3])
def suite():
suite1 = unittest.makeSuite(testcase_ulist)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
|
oscarolar/odoo
|
refs/heads/master
|
addons/account/wizard/account_report_general_ledger.py
|
75
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape'] is False:
data['form'].pop('landscape')
return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
VigTech/Vigtech-Services
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/utils/functional.py
|
36
|
import copy
import operator
import sys
import warnings
from functools import wraps
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import copyreg
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
warnings.warn("memoize wrapper is deprecated and will be removed in "
"Django 1.9. Use django.utils.lru_cache instead.",
RemovedInDjango19Warning, stacklevel=2)
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is
# a builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3.3 will call __reduce__ when pickling; this method is needed
# to serialize and deserialize correctly.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce_ex__(self, proto):
if proto >= 2:
# On Py3, since the default protocol is 3, pickle uses the
# ``__newobj__`` method (& more efficient opcodes) for writing.
return (self.__newobj__, (self.__class__,), self.__getstate__())
else:
# On Py2, the default protocol is 0 (for back-compat) & the above
# code fails miserably (see regression test). Instead, we return
# exactly what's returned if there's no ``__reduce__`` method at
# all.
return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode)
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if sys.version_info >= (2, 7, 2):
from functools import total_ordering
else:
# For Python < 2.7.2. total_ordering in versions prior to 2.7.2 is buggy.
# See http://bugs.python.org/issue10042 for details. For these versions use
# code borrowed from Python 2.7.3.
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
|
Spiderlover/Toontown
|
refs/heads/master
|
toontown/speedchat/TTSCBoardingMenu.py
|
6
|
from direct.showbase import PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from otp.otpbase import OTPLocalizer
BoardingMenuGuide = [(OTPLocalizer.BoardingMenuSections[0], []),
(OTPLocalizer.BoardingMenuSections[1], []),
(OTPLocalizer.BoardingMenuSections[2], []),
(OTPLocalizer.BoardingMenuSections[3], [5005,
5006,
5007,
5008,
5009])]
GroupPhrases = [5000,
5001,
5002,
5003,
5004]
ZoneIdsToMsgs = {10000: [GroupPhrases, [5100, 5101, 5102], [5200, 5201, 5202]],
10100: [GroupPhrases, [5103], [5203]],
11100: [GroupPhrases, [5104], [5204]],
11200: [GroupPhrases, [5105, 5106], [5205, 5206]],
12000: [GroupPhrases, [5107, 5108, 5109], [5207, 5208, 5209]],
12100: [GroupPhrases, [5110], [5210]],
13100: [GroupPhrases, [5111], [5211]],
13200: [GroupPhrases, [5112,
5113,
5114,
5115], [5212,
5213,
5214,
5215]]}
class TTSCBoardingMenu(SCMenu):
def __init__(self, zoneId):
SCMenu.__init__(self)
self.__boardingMessagesChanged(zoneId)
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __boardingMessagesChanged(self, zoneId):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for count in xrange(len(BoardingMenuGuide)):
section = BoardingMenuGuide[count]
if section[0] == -1:
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link boarding phrase %s which does not seem to exist' % phrase
break
self.append(SCStaticTextTerminal(phrase))
else:
menu = SCMenu()
phrases = ZoneIdsToMsgs[zoneId][count]
for phrase in phrases:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link boarding phrase %s which does not seem to exist' % phrase
break
menu.append(SCStaticTextTerminal(phrase))
menuName = str(section[0])
self.append(SCMenuHolder(menuName, menu))
|
lofar-astron/factor
|
refs/heads/master
|
factor/scripts/copy_column.py
|
3
|
#! /usr/bin/env python
"""
Script to copy a column between MS files
"""
import argparse
from argparse import RawTextHelpFormatter
import casacore.tables as pt
import numpy
import sys
import os
def copy_column_to_ms(ms, inputcol, outputcol, ms_from=None, use_compression=False):
"""
Copies one column to another, within an MS file or between two MS files
Parameters
----------
ms : str
MS file receiving copy
inputcol : str
Column name to copy from
outputcol : str
Column name to copy to
ms_from : str, optional
MS file to copy from. If None, the column is copied internally
"""
t = pt.table(ms, readonly=False, ack=False)
if ms_from is not None:
tf = pt.table(ms_from, readonly=False, ack=False)
data = tf.getcol(inputcol)
desc = tf.getcoldesc(inputcol)
else:
data = t.getcol(inputcol)
desc = t.getcoldesc(inputcol)
# Add the output column if needed
if outputcol not in t.colnames():
if use_compression:
# Set DyscoStMan to be storage manager for DATA and WEIGHT_SPECTRUM
# We use a visibility bit rate of 16 and truncation of 1.5 sigma to keep the
# compression noise below ~ 0.01 mJy, as estimated from Fig 4 of
# Offringa (2016). For the weights, we use a bit rate of 12, as
# recommended in Sec 4.4 of Offringa (2016)
desc['name'] = outputcol
dmi = {
'SPEC': {
'dataBitCount': numpy.uint32(16),
'distribution': 'TruncatedGaussian',
'distributionTruncation': 1.5,
'normalization': 'RF',
'weightBitCount': numpy.uint32(12)},
'NAME': '{}_dm'.format(outputcol),
'SEQNR': 1,
'TYPE': 'DyscoStMan'}
desc['option'] = 1 # make a Direct column
t.addcols(desc, dmi)
else:
desc['name'] = outputcol
t.addcols(desc)
if use_compression:
# Replace flagged values with NaNs before compression
flags = t.getcol('FLAG')
flagged = numpy.where(flags)
data[flagged] = numpy.NaN
t.putcol(outputcol, data)
t.flush()
t.close()
def copy_column_to_bands(mslist, ms_from, inputcol, outputcol):
"""
Copies one column from an MS file to multiple MS files (bands)
Parameters
----------
mslist : list
MS files receiving copy
ms_from : str
MS file to copy from
inputcol : str
Column name to copy from
outputcol : str
Column name to copy to
"""
datain = pt.table(ms_from)
data = datain.getcol(inputcol, nrow=1)
numberofchans = numpy.int(numpy.shape(data)[1])
chanperms = numberofchans/numpy.int(len(mslist))
for ms_id, ms in enumerate(mslist):
if os.path.isdir(ms):
data = datain.getcolslice(inputcol, [chanperms*ms_id,0], [(chanperms*(ms_id+1))-1,3])
dataout = pt.table(ms, readonly=False)
dataout.putcol(outputcol, data)
dataout.flush()
dataout.close()
def copy_column_from_bands(mslist, ms_to, inputcol, outputcol):
"""
Copies one column from multiple MS files (bands) to a single MS file
Note: the bands are assumed to be ordered by frequency, with a nonexisting
file (e.g., 'dummy.ms') denoting missing bands
Parameters
----------
mslist : list
MS files to copy from
ms_to : str
MS file receiving copy
inputcol : str
Column name to copy from
outputcol : str
Column name to copy to
"""
dataout = pt.table(ms_to, readonly=False)
data = dataout.getcol(outputcol, nrow=1)
numberofchans = numpy.int(numpy.shape(data)[1])
chanperms = numberofchans/numpy.int(len(mslist))
for ms_id, ms in enumerate(mslist):
if os.path.isdir(ms):
datain = pt.table(ms, readonly=True)
data = datain.getcol(inputcol)
dataout.putcolslice(outputcol, data, [chanperms*ms_id,0], [(chanperms*(ms_id+1))-1,3])
datain.close()
dataout.flush()
dataout.close()
def main(ms_from, ms_to, column_from, column_to, do_copy=True, use_compression=False):
"""
Copy a column between MS files
Parameters
----------
ms_from : str
Name of MS file to copy from
ms_to : str or list
Name of MS file or list of MS files to copy to. May be given as a list
or as a string (e.g., '[ms1, ms2]'
column_from : str
Name of column to copy from
column_to : str
Name of column to copy to
do_copy : bool, optional
If False, the copy is NOT done (used to skip a copy step in a pipeline)
use_compression : bool, optional
If True, use Dysco compression
"""
if type(do_copy) is str:
if do_copy.lower() == 'true':
do_copy = True
else:
do_copy = False
if type(use_compression) is str:
if use_compression.lower() == 'true':
use_compression = True
else:
use_compression = False
if not do_copy:
print('Copy skipped (do_copy = False)')
return
if type(ms_to) is str:
if '[' in ms_to:
ms_to = ms_to.strip('[]').split(',')
ms_to = [m.strip() for m in ms_to]
if type(ms_from) is str:
if '[' in ms_from:
ms_from = ms_from.strip('[]').split(',')
ms_from = [m.strip() for m in ms_from]
if type(ms_to) is list and type(ms_from) is list:
print('ERROR: ms_from and ms_to cannot both be lists')
sys.exit(1)
if type(ms_to) is list:
# List means call copy_column_to_bands()
copy_column_to_bands(ms_to, ms_from, column_from, column_to)
elif type(ms_from) is list:
# List means call copy_column_from_bands()
copy_column_from_bands(ms_from, ms_to, column_from, column_to)
else:
if ms_to == ms_from:
ms_from = None
copy_column_to_ms(ms_to, column_from, column_to, ms_from, use_compression)
if __name__ == '__main__':
descriptiontext = "Copy a column between MS files.\n"
parser = argparse.ArgumentParser(description=descriptiontext, formatter_class=RawTextHelpFormatter)
parser.add_argument('ms_from', help='name of the MS file to copy from')
parser.add_argument('ms_to', help='name of the MS file to copy to')
parser.add_argument('column_from', help='name of the column to copy from')
parser.add_argument('column_to', help='name of the column to copy to')
parser.add_argument('do_copy', help='Copy is done only if True')
args = parser.parse_args()
main(args.ms_from, args.ms_to, args.column_from, args.column_to,
do_copy=args.do_copy)
|
russellmayhew/satchmo
|
refs/heads/master
|
satchmo/apps/payment/views/cron.py
|
8
|
from datetime import datetime, timedelta
from decimal import Decimal
from django.http import HttpResponse
from django.utils.translation import ugettext, ugettext_lazy as _
from livesettings import config_get_group, config_value
from satchmo_store.shop.models import Order, OrderItem, OrderPayment
from satchmo_utils.views import bad_or_missing
import logging
log = logging.getLogger('payment.views.cron')
def cron_rebill(request=None):
"""Rebill customers with expiring recurring subscription products
This can either be run via a url with GET key authentication or
directly from a shell script.
"""
#TODO: support re-try billing for failed transactions
if request is not None:
if not config_value('PAYMENT', 'ALLOW_URL_REBILL'):
return bad_or_missing(request, _("Feature is not enabled."))
if 'key' not in request.GET or request.GET['key'] != config_value('PAYMENT','CRON_KEY'):
return HttpResponse("Authentication Key Required")
expiring_subscriptions = OrderItem.objects.filter(expire_date__gte=datetime.now()).order_by('order', 'id', 'expire_date')
for item in expiring_subscriptions:
if item.product.is_subscription:#TODO - need to add support for products with trial but non-recurring
if item.product.subscriptionproduct.recurring_times and item.product.subscriptionproduct.recurring_times + item.product.subscriptionproduct.get_trial_terms().count() == OrderItem.objects.filter(order=item.order, product=item.product).count():
continue
if item.expire_date == datetime.date(datetime.now()) and item.completed:
if item.id == OrderItem.objects.filter(product=item.product, order=item.order).order_by('-id')[0].id:
#bill => add orderitem, recalculate total, porocess card
new_order_item = OrderItem(order=item.order, product=item.product, quantity=item.quantity, unit_price=item.unit_price, line_item_price=item.line_item_price)
#if product is recurring, set subscription end
if item.product.subscriptionproduct.recurring:
new_order_item.expire_date = item.product.subscriptionproduct.calc_expire_date()
#check if product has 2 or more trial periods and if the last one paid was a trial or a regular payment.
ordercount = item.order.orderitem_set.all().count()
if item.product.subscriptionproduct.get_trial_terms().count() > 1 and item.unit_price == item.product.subscriptionproduct.get_trial_terms(ordercount - 1).price:
new_order_item.unit_price = item.product.subscriptionproduct.get_trial.terms(ordercount).price
new_order_item.line_item_price = new_order_item.quantity * new_order_item.unit_price
new_order_item.expire_date = item.product.subscriptionproduct.get_trial_terms(ordercount).calc_expire_date()
new_order_item.save()
item.order.recalculate_total()
# if new_order_item.product.subscriptionproduct.is_shippable == 3:
# item.order.total = item.order.total - item.order.shipping_cost
# item.order.save()
payments = item.order.payments.all()[0]
#list of ipn based payment modules. Include processors that use 3rd party recurring billing.
ipn_based = ['PAYPAL']
if not payments.payment in ipn_based and item.order.balance > 0:
#run card
#Do the credit card processing here & if successful, execute the success_handler
from livesettings import config_get_group
payment_module = config_get_group('PAYMENT_%s' % payments.payment)
credit_processor = payment_module.MODULE.load_module('processor')
processor = credit_processor.PaymentProcessor(payment_module)
processor.prepare_data(item.order)
result = processor.process()
if result.payment:
reason_code = result.payment.reason_code
else:
reason_code = "unknown"
log.info("""Processing %s recurring transaction with %s
Order #%i
Results=%s
Response=%s
Reason=%s""",
payment_module.LABEL.value,
payment_module.KEY.value,
item.order.id,
result.success,
reason_code,
result.message)
if result.success:
#success handler
item.order.add_status(status='New', notes = ugettext("Subscription Renewal Order successfully submitted"))
new_order_item.completed = True
new_order_item.save()
orderpayment = OrderPayment(order=item.order, amount=item.order.balance, payment=unicode(payment_module.KEY.value))
orderpayment.save()
return HttpResponse()
|
blambov/cassandra
|
refs/heads/trunk
|
pylib/cqlshlib/test/winpty.py
|
62
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from cStringIO import StringIO
from Queue import Queue, Empty
class WinPty:
def __init__(self, stdin):
self._s = stdin
self._q = Queue()
def _read_next_char(stdin, queue):
while True:
char = stdin.read(1) # potentially blocking read
if char:
queue.put(char)
else:
break
self._t = Thread(target=_read_next_char, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # read characters asynchronously from stdin
def read(self, blksize=-1, timeout=1):
buf = StringIO()
count = 0
try:
while count < blksize or blksize == -1:
next = self._q.get(block=timeout is not None, timeout=timeout)
buf.write(next)
count = count + 1
except Empty:
pass
return buf.getvalue()
|
tridao/cvxpy
|
refs/heads/master
|
cvxpy/atoms/lambda_sum_largest.py
|
12
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.expression import Expression
from cvxpy.expressions.variables import Semidef
from cvxpy.atoms.lambda_max import lambda_max
from cvxpy.atoms.affine.trace import trace
def lambda_sum_largest(X, k):
"""Sum of the largest k eigenvalues.
"""
X = Expression.cast_to_const(X)
if X.size[0] != X.size[1]:
raise ValueError("First argument must be a square matrix.")
elif int(k) != k or k <= 0:
raise ValueError("Second argument must be a positive integer.")
"""
S_k(X) denotes lambda_sum_largest(X, k)
t >= k S_k(X - Z) + trace(Z), Z is PSD
implies
t >= ks + trace(Z)
Z is PSD
sI >= X - Z (PSD sense)
which implies
t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X)
We use the fact that
S_k(X) = sup_{sets of k orthonormal vectors u_i}\sum_{i}u_i^T X u_i
and if Z >= X in PSD sense then
\sum_{i}u_i^T Z u_i >= \sum_{i}u_i^T X u_i
We have equality when s = lambda_k and Z diagonal
with Z_{ii} = (lambda_i - lambda_k)_+
"""
Z = Semidef(X.size[0])
return k*lambda_max(X - Z) + trace(Z)
|
roaet/quark
|
refs/heads/master
|
quark/tests/cache/test_security_groups_client.py
|
2
|
# Copyright 2014 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import json
import mock
import netaddr
from oslo_config import cfg
import redis
from quark.agent.xapi import VIF
from quark.cache import security_groups_client as sg_client
from quark.db import models
from quark.environment import Capabilities
from quark import exceptions as q_exc
from quark.tests import test_base
CONF = cfg.CONF
class TestRedisSecurityGroupsClient(test_base.TestBase):
def setUp(self):
super(TestRedisSecurityGroupsClient, self).setUp()
# Forces the connection pool to be recreated on every test
sg_client.SecurityGroupsClient.connection_pool = None
temp_envcaps = [Capabilities.SECURITY_GROUPS, Capabilities.EGRESS]
CONF.set_override('environment_capabilities', temp_envcaps, 'QUARK')
def tearDown(self):
CONF.clear_override('environment_capabilities', 'QUARK')
@mock.patch("uuid.uuid4")
@mock.patch("quark.cache.redis_base.TwiceRedis")
def test_apply_rules(self, strict_redis, uuid4):
client = sg_client.SecurityGroupsClient()
device_id = "device"
uuid4.return_value = "uuid"
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF")
client.apply_rules(device_id, mac_address.value, [])
self.assertTrue(client._client.master.hset.called)
redis_key = client.vif_key(device_id, mac_address.value)
rule_dict = {"rules": []}
client._client.master.hset.assert_any_call(
redis_key, sg_client.SECURITY_GROUP_HASH_ATTR,
json.dumps(rule_dict))
client._client.master.hset.assert_any_call(
redis_key, sg_client.SECURITY_GROUP_ACK, False)
@mock.patch("uuid.uuid4")
@mock.patch("quark.cache.redis_base.TwiceRedis")
def test_delete_vif(self, strict_redis, uuid4):
client = sg_client.SecurityGroupsClient()
device_id = "device"
uuid4.return_value = "uuid"
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF")
redis_key = client.vif_key(device_id, mac_address.value)
client.delete_vif(device_id, mac_address)
client._client.master.delete.assert_called_with(redis_key)
def test_apply_rules_set_fails_gracefully(self):
port_id = 1
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF")
conn_err = redis.ConnectionError
with mock.patch("quark.cache.security_groups_client."
"redis_base.ClientBase") as redis_mock:
mocked_redis_cli = mock.MagicMock()
redis_mock.return_value = mocked_redis_cli
client = sg_client.SecurityGroupsClient()
mocked_redis_cli.master.hset.side_effect = conn_err
with self.assertRaises(q_exc.RedisConnectionFailure):
client.apply_rules(port_id, mac_address.value, [])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_group_no_rules(self, strict_redis):
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
payload = client.serialize_groups([group])
self.assertEqual([], payload)
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_group_with_rules(self, strict_redis):
rule_dict = {"ethertype": 0x800, "protocol": 6, "port_range_min": 80,
"port_range_max": 443, "direction": "ingress"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x800, rule["ethertype"])
self.assertEqual(6, rule["protocol"])
self.assertEqual(80, rule["port start"])
self.assertEqual(443, rule["port end"])
self.assertEqual("allow", rule["action"])
self.assertEqual("ingress", rule["direction"])
self.assertEqual("", rule["source network"])
self.assertEqual("", rule["destination network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_group_with_rules_and_remote_network(self, strict_redis):
rule_dict = {"ethertype": 0x800, "protocol": 1, "direction": "ingress",
"remote_ip_prefix": "192.168.0.0/24"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x800, rule["ethertype"])
self.assertEqual(1, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("ingress", rule["direction"])
self.assertEqual("::ffff:192.168.0.0/120", rule["source network"])
self.assertEqual("", rule["destination network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_group_egress_rules(self, strict_redis):
rule_dict = {"ethertype": 0x800, "protocol": 1,
"direction": "egress",
"remote_ip_prefix": "192.168.0.0/24"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x800, rule["ethertype"])
self.assertEqual(1, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("egress", rule["direction"])
self.assertEqual("::ffff:192.168.0.0/120", rule["destination network"])
self.assertEqual("", rule["source network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_filters_source_v4_net(self, strict_redis):
rule_dict = {"ethertype": 0x800, "protocol": 1, "direction": "ingress",
"remote_ip_prefix": "192.168.0.0/0"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x800, rule["ethertype"])
self.assertEqual(1, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("ingress", rule["direction"])
self.assertEqual("", rule["source network"])
self.assertEqual("", rule["destination network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_filters_source_v6_net(self, strict_redis):
rule_dict = {"ethertype": 0x86DD, "protocol": 58,
"direction": "ingress",
"remote_ip_prefix": "feed::/0"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x86DD, rule["ethertype"])
self.assertEqual(58, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("ingress", rule["direction"])
self.assertEqual("", rule["source network"])
self.assertEqual("", rule["destination network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_filters_dest_v4_net(self, strict_redis):
rule_dict = {"ethertype": 0x800, "protocol": 1, "direction": "egress",
"remote_ip_prefix": "192.168.0.0/0"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x800, rule["ethertype"])
self.assertEqual(1, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("egress", rule["direction"])
self.assertEqual("", rule["source network"])
self.assertEqual("", rule["destination network"])
@mock.patch(
"quark.cache.security_groups_client.redis_base.TwiceRedis")
def test_serialize_filters_dest_v6_net_(self, strict_redis):
rule_dict = {"ethertype": 0x86DD, "protocol": 58,
"direction": "egress",
"remote_ip_prefix": "feed::/0"}
client = sg_client.SecurityGroupsClient()
group = models.SecurityGroup()
rule = models.SecurityGroupRule()
rule.update(rule_dict)
group.rules.append(rule)
payload = client.serialize_groups([group])
rule = payload[0]
self.assertEqual(0x86DD, rule["ethertype"])
self.assertEqual(58, rule["protocol"])
self.assertEqual(None, rule["icmp type"])
self.assertEqual(None, rule["icmp code"])
self.assertEqual("allow", rule["action"])
self.assertEqual("egress", rule["direction"])
self.assertEqual("", rule["source network"])
self.assertEqual("", rule["destination network"])
class TestRedisForAgent(test_base.TestBase):
def setUp(self):
super(TestRedisForAgent, self).setUp()
patch = mock.patch("quark.cache.security_groups_client.redis_base."
"TwiceRedis")
self.MockSentinel = patch.start()
self.addCleanup(patch.stop)
@mock.patch(
"quark.cache.security_groups_client.SecurityGroupsClient.get_fields")
def test_get_security_group_states_empty(self, mock_get_fields):
rc = sg_client.SecurityGroupsClient()
mock_get_fields.return_value = []
group_states = rc.get_security_group_states([])
mock_get_fields.assert_called_once_with([],
sg_client.SECURITY_GROUP_ACK)
self.assertEqual(group_states, {})
@mock.patch(
"quark.cache.security_groups_client.SecurityGroupsClient.get_fields")
def test_get_security_group_states_nonempty(self, mock_get_fields):
rc = sg_client.SecurityGroupsClient()
mock_get_fields.return_value = [
None,
'{}',
'{"%s": False}' % sg_client.SECURITY_GROUP_ACK,
'{"%s": True}' % sg_client.SECURITY_GROUP_ACK,
'{"%s": "1-2-3"}' % sg_client.SECURITY_GROUP_ACK]
recs = [{"MAC": 2}, {"MAC": 4}, {"MAC": 6}, {"MAC": 8}, {"MAC": 0}]
new_interfaces = ([VIF(1, recs[0], 9), VIF(3, recs[1], 0),
VIF(5, recs[2], 1), VIF(7, recs[3], 2),
VIF(9, recs[4], 3)])
group_states = rc.get_security_group_states(new_interfaces)
mock_get_fields.assert_called_once_with(
["1.000000000002", "3.000000000004", "5.000000000006",
"7.000000000008", "9.000000000000"],
sg_client.SECURITY_GROUP_ACK)
self.assertEqual(group_states, {new_interfaces[2]: False,
new_interfaces[3]: True})
|
gsarma/PyOpenWorm
|
refs/heads/dev
|
OpenWormData/__init__.py
|
1
|
from rdflib.namespace import Namespace
BIO_ENT_NS = Namespace('http://openworm.org/entities/bio#')
|
bgxavier/nova
|
refs/heads/master
|
nova/tests/unit/fake_crypto.py
|
78
|
# Copyright 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def ensure_ca_filesystem():
pass
def fetch_ca(project_id=None):
rootca = """-----BEGIN CERTIFICATE-----
MIICyzCCAjSgAwIBAgIJAIJ/UoFWKoOUMA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV
BAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK
Q2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTIxMDAyMTg1NzQ1WhcNMTMxMDAy
MTg1NzQ1WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu
IFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG
SIb3DQEBAQUAA4GNADCBiQKBgQCg0Bn8WSqbJF3QNTZUxo1TzmFBxuqvhjZLKbnQ
IiShdVIWUK7RC8frq8FJI7dgJNmvkIBn9njABWDoZmurQRCzD65yCSbUc4R2ea5H
IK4wQIui0CJykvMBNjAe3bzztVVs8/ccDTsjtqq3F/KeQkKzQVfSWBrJSmYtG5tO
G+dOSwIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFCljRfaNOsA/
9mHuq0io7Lt83FtaMH4GA1UdIwR3MHWAFCljRfaNOsA/9mHuq0io7Lt83FtaoVKk
UDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx
EzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkAgn9SgVYqg5QwDQYJ
KoZIhvcNAQEEBQADgYEAEbpJOOlpKCh5omwfAwAfFg1ml4h/FJiCH3PETmOCc+3l
CtWTBd4MG8AoH7A3PU2JKAGVQ5XWo6+ihpW1RgfQpCnloI6vIeGcws+rSLnlzULt
IvfCJpRg7iQdR3jZGt3295behtP1GsCqipJEulOkOaEIs8iLlXgSOG94Mkwlb4Q=
-----END CERTIFICATE-----
"""
return rootca
def generate_x509_cert(user_id, project_id, bits=1024):
pk = """-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQC4h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnH
Jjbd0j7HNlSADWeAMuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSG
d1n4Yrar1eC8tK3Rld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQAB
AoGBAIjfxx4YU/vO1lwUC4OwyS92q3OYcPk6XdakJryZHDTb4NcLmNzjt6bqIK7b
2enyB2fMWdNRWvGiueZ2HmiRLDyOGsAVdEsHvL4qbr9EZGTqC8Qxx+zTevWWf6pB
F1zxzbXNQDFZDf9kVsSLCkbMHITnW1k4MrM++9gfCO3WrfehAkEA4nd8TyCCZazq
KMOQwFLTNaiVLeTXCtvGopl4ZNiKYZ1qI3KDXb2wbAyArFuERlotxFlylXpwtlMo
SlI/C/sYqwJBANCX1sdfRJq8DpdP44ThWqOkWFLB9rBiwyyBt8746fX8amwr8eyz
H44/z5GT/Vyp8qFsjkuDzeP93eeDnr2qE0UCP1zipRnPO6x4P5J4o+Y+EmLvwkAQ
nCLYAaCvUbILHrbq2Z2wWjEYnEO03RHUd2xjkGH4TgcBMTmW4e+ZzEIduwJACnIw
LVfWBbG5QVac3EC021EVoz9XbUnk4Eu2usS4Yrs7USN6QBJQWD1V1cKFg6h3ICJh
leKJ4wsJm9h5kKH9yQJBAN8CaX223MlTSuBOVuIOwNA+09iLfx4UCLiH1fGMKDpe
xVcmkM3qCnTqNxrAPSFdT9IyB3IXiaLWbvzl7MfiOwQ=
-----END RSA PRIVATE KEY-----
"""
csr = """Certificate:
Data:
Version: 1 (0x0)
Serial Number: 23 (0x17)
Signature Algorithm: md5WithRSAEncryption
Issuer: O=NOVA ROOT, L=Mountain View, ST=California, C=US
Validity
Not Before: Oct 2 19:31:45 2012 GMT
Not After : Oct 2 19:31:45 2013 GMT
Subject: C=US, ST=California, O=OpenStack, OU=NovaDev, """
"""CN=openstack-fake-2012-10-02T19:31:45Z
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public Key: (1024 bit)
Modulus (1024 bit):
00:b8:87:67:7a:de:28:ed:f6:5d:1f:20:14:58:df:
b0:f7:62:3d:85:61:a8:c2:31:49:5f:b5:2a:07:34:
0e:25:13:0d:2e:4d:79:c7:26:36:dd:d2:3e:c7:36:
54:80:0d:67:80:32:e6:a8:48:33:69:ec:22:2c:5c:
cb:7a:88:0f:c0:48:de:67:14:54:d9:94:b4:6a:23:
36:28:23:44:47:8a:24:89:8e:f4:86:77:59:f8:62:
b6:ab:d5:e0:bc:b4:ad:d1:95:dd:59:a3:aa:e3:ea:
d3:ae:23:17:c5:54:96:a3:25:56:72:90:20:07:8c:
63:4d:be:e9:60:7e:10:57:17
Exponent: 65537 (0x10001)
Signature Algorithm: md5WithRSAEncryption
32:82:ff:8b:92:0e:8d:9c:6b:ce:7e:fe:34:16:2a:4c:47:4f:
c7:28:a2:33:1e:48:56:2e:4b:e8:e8:e3:48:b1:3d:a3:43:21:
ef:83:e7:df:e2:10:91:7e:9a:c0:4d:1e:96:68:2b:b9:f7:84:
7f:ec:84:8a:bf:bc:5e:50:05:d9:ce:4a:1a:bf:d2:bf:0c:d1:
7e:ec:64:c3:a5:37:78:a3:a6:2b:a1:b7:1c:cc:c8:b9:78:61:
98:50:3c:e6:28:34:f1:0e:62:bb:b5:d7:a1:dd:1f:38:c6:0d:
58:9f:81:67:ff:9c:32:fc:52:7e:6d:8c:91:43:49:fe:e3:48:
bb:40
-----BEGIN CERTIFICATE-----
MIICMzCCAZwCARcwDQYJKoZIhvcNAQEEBQAwTjESMBAGA1UEChMJTk9WQSBST09U
MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQIEwpDYWxpZm9ybmlhMQsw
CQYDVQQGEwJVUzAeFw0xMjEwMDIxOTMxNDVaFw0xMzEwMDIxOTMxNDVaMHYxCzAJ
BgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRIwEAYDVQQKEwlPcGVuU3Rh
Y2sxEDAOBgNVBAsTB05vdmFEZXYxLDAqBgNVBAMTI29wZW5zdGFjay1mYWtlLTIw
MTItMTAtMDJUMTk6MzE6NDVaMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4
h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnHJjbd0j7HNlSADWeA
MuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSGd1n4Yrar1eC8tK3R
ld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQABMA0GCSqGSIb3DQEB
BAUAA4GBADKC/4uSDo2ca85+/jQWKkxHT8coojMeSFYuS+jo40ixPaNDIe+D59/i
EJF+msBNHpZoK7n3hH/shIq/vF5QBdnOShq/0r8M0X7sZMOlN3ijpiuhtxzMyLl4
YZhQPOYoNPEOYru116HdHzjGDVifgWf/nDL8Un5tjJFDSf7jSLtA
-----END CERTIFICATE-----
"""
return pk, csr
def get_x509_cert_and_fingerprint():
fingerprint = "a1:6f:6d:ea:a6:36:d0:3a:c6:eb:b6:ee:07:94:3e:2a:90:98:2b:c9"
certif = (
"-----BEGIN CERTIFICATE-----\n"
"MIIDIjCCAgqgAwIBAgIJAIE8EtWfZhhFMA0GCSqGSIb3DQEBCwUAMCQxIjAgBgNV\n"
"BAMTGWNsb3VkYmFzZS1pbml0LXVzZXItMTM1NTkwHhcNMTUwMTI5MTgyMzE4WhcN\n"
"MjUwMTI2MTgyMzE4WjAkMSIwIAYDVQQDExljbG91ZGJhc2UtaW5pdC11c2VyLTEz\n"
"NTU5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv4lv95ofkXLIbALU\n"
"UEb1f949TYNMUvMGNnLyLgGOY+D61TNG7RZn85cRg9GVJ7KDjSLN3e3LwH5rgv5q\n"
"pU+nM/idSMhG0CQ1lZeExTsMEJVT3bG7LoU5uJ2fJSf5+hA0oih2M7/Kap5ggHgF\n"
"h+h8MWvDC9Ih8x1aadkk/OEmJsTrziYm0C/V/FXPHEuXfZn8uDNKZ/tbyfI6hwEj\n"
"nLz5Zjgg29n6tIPYMrnLNDHScCwtNZOcnixmWzsxCt1bxsAEA/y9gXUT7xWUf52t\n"
"2+DGQbLYxo0PHjnPf3YnFXNavfTt+4c7ZdHhOQ6ZA8FGQ2LJHDHM1r2/8lK4ld2V\n"
"qgNTcQIDAQABo1cwVTATBgNVHSUEDDAKBggrBgEFBQcDAjA+BgNVHREENzA1oDMG\n"
"CisGAQQBgjcUAgOgJQwjY2xvdWRiYXNlLWluaXQtdXNlci0xMzU1OUBsb2NhbGhv\n"
"c3QwDQYJKoZIhvcNAQELBQADggEBAHHX/ZUOMR0ZggQnfXuXLIHWlffVxxLOV/bE\n"
"7JC/dtedHqi9iw6sRT5R6G1pJo0xKWr2yJVDH6nC7pfxCFkby0WgVuTjiu6iNRg2\n"
"4zNJd8TGrTU+Mst+PPJFgsxrAY6vjwiaUtvZ/k8PsphHXu4ON+oLurtVDVgog7Vm\n"
"fQCShx434OeJj1u8pb7o2WyYS5nDVrHBhlCAqVf2JPKu9zY+i9gOG2kimJwH7fJD\n"
"xXpMIwAQ+flwlHR7OrE0L8TNcWwKPRAY4EPcXrT+cWo1k6aTqZDSK54ygW2iWtni\n"
"ZBcstxwcB4GIwnp1DrPW9L2gw5eLe1Sl6wdz443TW8K/KPV9rWQ=\n"
"-----END CERTIFICATE-----\n")
return certif, fingerprint
def get_ssh_public_key():
public_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
"B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
"RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
"9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
"pSxsIbECHw== Generated-by-Nova")
return public_key
|
mdhaber/scipy
|
refs/heads/master
|
scipy/cluster/tests/test_hierarchy.py
|
12
|
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib # type: ignore[import]
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
class TestLinkage:
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted']:
self.check_linkage_tdist(method)
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method)
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
def test_optimal_leaf_ordering(self):
Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
class TestLinkageTies:
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method)
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent:
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth)
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance:
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion:
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster:
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust')
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust')
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t)
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders:
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic:
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in range(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage:
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent:
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid)
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage:
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList:
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method)
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond:
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in range(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic:
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists:
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method)
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts:
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method)
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat:
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i)
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
self.check_maxRstat_empty_linkage(i)
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
self.check_maxRstat_difrow_linkage(i)
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i)
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i)
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram:
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
def test_labels_as_array_or_list(self):
# test for gh-12418
Z = linkage(hierarchy_test_data.ytdist, 'single')
labels = np.array([1, 3, 2, 6, 4, 5])
result1 = dendrogram(Z, labels=labels, no_plot=True)
result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
assert result1 == result2
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self):
link = np.array([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation)
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_optimal_leaf_ordering():
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
hierarchy_test_data.ytdist)
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
hierarchy_test_data.X)
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
|
CamAndPineapple/starrynight
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
395
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
proxysh/Safejumper-for-Desktop
|
refs/heads/master
|
buildmac/Resources/env/lib/python2.7/site-packages/setuptools/command/install_egg_info.py
|
412
|
from distutils import log, dir_util
import os
from setuptools import Command
from setuptools import namespaces
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(namespaces.Installer, Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
|
EvanzzzZ/mxnet
|
refs/heads/master
|
plugin/opencv/opencv.py
|
15
|
# coding: utf-8
# pylint: disable=too-many-arguments,no-member,invalid-name
"""Opencv plugin for mxnet"""
import random
import ctypes
import cv2
import mxnet as mx
from mxnet.base import _LIB
from mxnet.base import mx_uint, NDArrayHandle, check_call
def imdecode(str_img, flag=1):
"""Decode image from str buffer.
Wrapper for cv2.imdecode that uses mx.nd.NDArray
Parameters
----------
str_img : str
str buffer read from image file
flag : int
same as flag for cv2.imdecode
Returns
-------
img : NDArray
decoded image in (width, height, channels)
with BGR color channel order
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img),
mx_uint(len(str_img)),
flag, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def resize(src, size, interpolation=cv2.INTER_LINEAR):
"""Decode image from str buffer.
Wrapper for cv2.imresize that uses mx.nd.NDArray
Parameters
----------
src : NDArray
image in (width, height, channels)
size : tuple
target size in (width, height)
interpolation : int
same as interpolation for cv2.imresize
Returns
-------
img : NDArray
resized image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]),
interpolation, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0):
"""Pad image border
Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray
Parameters
----------
src : NDArray
Image in (width, height, channels).
Others are the same with cv2.copyMakeBorder
Returns
-------
img : NDArray
padded image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot),
ctypes.c_int(left), ctypes.c_int(right),
ctypes.c_int(border_type), ctypes.c_double(value),
ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def scale_down(src_size, size):
"""Scale down crop size if it's bigger than image size"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w*sh)/h, sh
if sw < w:
w, h = sw, float(h*sw)/w
return int(w), int(h)
def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC):
"""Crop src at fixed location, and (optionally) resize it to size"""
out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2])))
if size is not None and (w, h) != size:
out = resize(out, size, interpolation=interpolation)
return out
def random_crop(src, size):
"""Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std):
"""Normalize src with mean and std"""
src -= mean
src /= std
return src
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
area = w*h
for _ in range(10):
new_area = random.uniform(min_area, 1.0) * area
new_ratio = random.uniform(*ratio)
new_w = int(new_area*new_ratio)
new_h = int(new_area/new_ratio)
if random.uniform(0., 1.) < 0.5:
new_w, new_h = new_h, new_w
if new_w > w or new_h > h:
continue
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
return random_crop(src, size)
class ImageListIter(mx.io.DataIter):
"""An example image iterator using opencv plugin"""
def __init__(self, root, flist, batch_size, size, mean=None):
mx.io.DataIter.__init__(self)
self.root = root
self.list = [line.strip() for line in open(flist).readlines()]
self.cur = 0
self.batch_size = batch_size
self.size = size
if mean is not None:
self.mean = mx.nd.array(mean)
else:
self.mean = None
def reset(self):
"""Reset iterator position to 0"""
self.cur = 0
def next(self):
"""Move iterator position forward"""
batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
i = self.cur
for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
str_img = open(self.root+self.list[i]+'.jpg').read()
img = imdecode(str_img, 1)
img, _ = random_crop(img, self.size)
batch[i - self.cur] = img
batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
ret = mx.io.DataBatch(data=[batch],
label=[],
pad=self.batch_size-(i-self.cur),
index=None)
self.cur = i
return ret
|
captainpete/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-entrypointsymbol.py
|
342
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure entrypointsymbol setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('entrypointsymbol.gyp', chdir=CHDIR)
test.build('entrypointsymbol.gyp', 'test_ok', chdir=CHDIR)
test.build('entrypointsymbol.gyp', 'test_fail', chdir=CHDIR, status=1)
test.pass_test()
|
jaja14/project4
|
refs/heads/master
|
lib/flask/testsuite/test_apps/config_module_app.py
|
1257
|
import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
arkanus/selenium-sunbro
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: utf-8 -*-
from setuptools import setup
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
README = os.path.join(ROOT, 'README.md')
try:
long_description = open(README).read()
except IOError:
long_description = None
setup(
name='selenium-sunbro',
version='0.0.4',
author='Marcos Sánchez',
author_email='arkanus@gmail.com',
description="Easily create page objects with a declarative syntax",
long_description=long_description,
py_modules=['sunbro'],
url='https://github.com/arkanus/selenium-sunbro',
license='Mozilla Public License 2.0 (MPL 2.0)',
keywords='selenium wedriver page object',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
],
install_requires=['selenium'],
extras_require={'UIAutomator': ['uiautomator']},
test_suite="tests",
)
|
wldcordeiro/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/fetch-access-control.py
|
78
|
import base64
import json
def main(request, response):
headers = []
headers.append(('X-ServiceWorker-ServerHeader', 'SetInTheServer'))
if "ACAOrigin" in request.GET:
for item in request.GET["ACAOrigin"].split(","):
headers.append(("Access-Control-Allow-Origin", item))
for suffix in ["Headers", "Methods", "Credentials"]:
query = "ACA%s" % suffix
header = "Access-Control-Allow-%s" % suffix
if query in request.GET:
headers.append((header, request.GET[query]))
if "ACEHeaders" in request.GET:
headers.append(("Access-Control-Expose-Headers", request.GET[query]))
if ("Auth" in request.GET and not request.auth.username) or "AuthFail" in request.GET:
status = 401
headers.append(('WWW-Authenticate', 'Basic realm="Restricted"'))
body = 'Authentication canceled'
return status, headers, body
if "PNGIMAGE" in request.GET:
headers.append(("Content-Type", "image/png"))
body = base64.decodestring("iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1B"
"AACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAAhSURBVDhPY3wro/KfgQLABKXJBqMG"
"jBoAAqMGDLwBDAwAEsoCTFWunmQAAAAASUVORK5CYII=")
return headers, body
username = request.auth.username if request.auth.username else "undefined"
password = request.auth.password if request.auth.username else "undefined"
cookie = request.cookies['cookie'].value if 'cookie' in request.cookies else "undefined"
files = []
for key, values in request.POST.iteritems():
assert len(values) == 1
value = values[0]
if not hasattr(value, "file"):
continue
data = value.file.read()
files.append({"key": key,
"name": value.file.name,
"type": value.type,
"error": 0, #TODO,
"size": len(data),
"content": data})
get_data = {key:request.GET[key] for key,value in request.GET.iteritems()}
post_data = {key:request.POST[key] for key,value in request.POST.iteritems()
if not hasattr(request.POST[key], "file")}
headers_data = {key:request.headers[key] for key,value in request.headers.iteritems()}
data = {"jsonpResult": "success",
"method": request.method,
"headers": headers_data,
"body": request.body,
"files": files,
"GET": get_data,
"POST": post_data,
"username": username,
"password": password,
"cookie": cookie}
return headers, "report( %s )" % json.dumps(data)
|
tkerola/chainer
|
refs/heads/master
|
tests/chainer_tests/datasets_tests/test_concatenated_dataset.py
|
13
|
import numpy as np
import six
import unittest
from chainer.datasets import ConcatenatedDataset
from chainer import testing
@testing.parameterize(
# basic usage
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 64, 48)),
)},
# more than two datasets
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 16, 48)),
np.random.uniform(size=(20, 3, 5, 5)),
)},
# single dataset
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
)},
# no dataset
{'datasets': ()},
# some datasets are empty
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
[],
np.random.uniform(size=(20, 3, 5, 5)),
[],
)},
# all datasets are empty
{'datasets': ([], [], [])},
)
class TestConcatenatedDataset(unittest.TestCase):
def setUp(self):
self.concatenated_dataset = ConcatenatedDataset(*self.datasets)
self.expected_dataset = [
sample for dataset in self.datasets for sample in dataset]
def test_concatenated_dataset(self):
self.assertEqual(
len(self.concatenated_dataset), len(self.expected_dataset))
for i, expected in enumerate(self.expected_dataset):
np.testing.assert_equal(self.concatenated_dataset[i], expected)
def test_concatenated_dataset_slice(self):
concatenated_slice = self.concatenated_dataset[1:8:2]
expected_slice = self.concatenated_dataset[1:8:2]
self.assertEqual(
len(concatenated_slice), len(expected_slice))
for concatenated, expected in six.moves.zip(
concatenated_slice, expected_slice):
np.testing.assert_equal(concatenated, expected)
testing.run_module(__name__, __file__)
|
julian-seward1/servo
|
refs/heads/master
|
tests/wpt/css-tests/css-fonts-3_dev/xhtml1/support/fonts/makegsubfonts.py
|
1616
|
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
jlanecox/googletest
|
refs/heads/master
|
googletest/test/googletest-catch-exceptions-test.py
|
81
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes googletest-catch-exceptions-test_ and
googletest-catch-exceptions-ex-test_ (programs written with
Google Test) and verifies their output.
"""
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the googletest-catch-exceptions-ex-test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-catch-exceptions-ex-test_')
# Path to the googletest-catch-exceptions-test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-catch-exceptions-no-ex-test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestSuite()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestSuite()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInDestructorTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInConstructorTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTestSuiteTest constructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTestSuiteTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTestSuiteTest::SetUp() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTestSuiteTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTestSuiteTest test body '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in SetUp()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInSetUpTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in TearDown()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTearDownTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTearDownTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
' thrown in the test body' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest::TearDownTestSuite() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest destructor '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
self.assertTrue(
'CxxExceptionInTestBodyTest::TearDown() '
'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assertTrue(
'Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT,
EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
jbair34/moose
|
refs/heads/devel
|
gui/gui/InputFileTreeWidget.py
|
31
|
#!/usr/bin/python
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from OptionsGUI import OptionsGUI
from GenSyntax import *
from ActionSyntax import *
from ParamTable import *
from CommentEditor import *
import MeshInfoFactory
from readInputFile import readInputFile, GPNode
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class InputFileTreeWidget(QtGui.QTreeWidget):
tree_changed = QtCore.Signal()
mesh_item_changed = QtCore.Signal(QtGui.QTreeWidgetItem)
def __init__(self, input_file_widget, win_parent=None):
QtGui.QTreeWidget.__init__(self, win_parent)
self.comment = ''
self.input_file_widget = input_file_widget
self.application = self.input_file_widget.application
self.action_syntax = self.input_file_widget.action_syntax
self.setExpandsOnDoubleClick(False)
self.setMinimumWidth(200)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self,QtCore.SIGNAL('customContextMenuRequested(QPoint)'), self._newContext)
self.addHardPathsToTree()
self.header().close()
QtCore.QObject.connect(self,
QtCore.SIGNAL("itemDoubleClicked(QTreeWidgetItem *, int)"),
self._doubleClickedItem)
QtCore.QObject.connect(self,
QtCore.SIGNAL("itemChanged(QTreeWidgetItem*, int)"),
self._itemChanged)
QtCore.QObject.connect(self,
QtCore.SIGNAL("currentItemChanged(QTreeWidgetItem*, QTreeWidgetItem*)"),
self._currentItemChanged)
def addHardPathsToTree(self):
# Add every hard path
for path in self.action_syntax.hard_paths:
self._recursivelyAddTreeItems(path.split('/'), self)
def loadData(self, counter, progress, main_sections):
QtCore.QObject.disconnect(self, QtCore.SIGNAL("itemChanged(QTreeWidgetItem*, int)"), self._itemChanged)
progress.setMaximum(counter+len(main_sections))
for section_name, section_node in main_sections.items():
counter+=1
progress.setValue(counter)
self._addDataRecursively(self, section_node)
self.addHardPathsToTree() # We do this here because * paths might add more paths underneath some of the paths
self._updateOtherGUIElements()
QtCore.QObject.connect(self, QtCore.SIGNAL("itemChanged(QTreeWidgetItem*, int)"), self._itemChanged)
def generatePathFromItem(self, item):
from_parent = ''
if item.parent():
from_parent = self.generatePathFromItem(item.parent())
return from_parent + '/' + str(item.text(0))
''' Looks for a child item of parent named "name"... with return None if there is no child named that '''
def findChildItemWithName(self, parent, name):
try: # This will fail when we're dealing with the QTreeWidget itself
num_children = parent.childCount()
except:
num_children = parent.topLevelItemCount()
for i in range(num_children):
child = None
try: # This will fail when we're dealing with the QTreeWidget itself
child = parent.child(i)
except:
child = parent.topLevelItem(i)
if child.text(0) == name:
return child
return None
def getMeshItemData(self):
mesh_item = self.findChildItemWithName(self, 'Mesh')
data = None
try:
return mesh_item.table_data
except:
pass
return None
def getMeshFileName(self):
mesh_data = self.getMeshItemData()
if mesh_data:
if 'file' in mesh_data:
return mesh_data['file']
else:
return None
def getOutputItemData(self):
output_item = self.findChildItemWithName(self, 'Outputs')
data = None
try:
return output_item.table_data
except:
pass
return None
##
# Initialize the list of output file and block names (private)
# This function initializes self._output_file_names and self._output_block_names
def getOutputFileAndBlockNames(self):
# Storage for file_base as a common parameter
common_file_base = ''
output_file_names = []
output_block_names = []
# Find the Outputs block items and the names of the sub-blocks
outputs = self.findChildItemWithName(self, 'Outputs')
outputs_children = self.getChildNames(outputs)
# Check for short-cut syntax (i.e., exodus = true)
# Make sure that the node has table_data before going on...not all do!
if hasattr(outputs, 'table_data'):
output_data = outputs.table_data
if 'file_base' in output_data:
common_file_base = output_data['file_base']
else:
common_file_base = 'peacock_run_tmp'
# Check for short-cut syntax (i.e., exodus = true)
if outputs.table_data and 'exodus' in outputs.table_data and outputs.table_data['exodus'] == 'true':
if common_file_base == 'peacock_run_tmp':
output_file_names.append(common_file_base + '_out.e')
else:
output_file_names.append(common_file_base + '.e')
output_block_names.append('exodus')
# Loop through each of the sub-blocks and grab the data, if type = Exodus
for item in outputs_children:
# Extract the data for the sub-block
child = self.findChildItemWithName(outputs, item)
output_data = child.table_data
# If the object is active (checked), it contains output_data, and is of type = Exodus, then extract the filename
if child.checkState(0) > 0 and ('type' in output_data) and (output_data['type'] == 'Exodus'):
file_base = common_file_base + "_" + output_data['Name']
# Check for file_base
if ('file_base' in output_data) and (output_data['file_base'] != ''):
file_base = output_data['file_base']
# Check for oversampling and appending of '_oversample'
if ('oversample' in output_data) and (output_data['oversample'] != '0') and ('append_oversample' in output_data) and (output_data['oversample'] != '0'):
file_base = file_base + '_oversample'
# Append the file_base and object name to the lists
output_file_names.append(file_base + '.e')
output_block_names.append(output_data['Name'])
# FIXME: Hack to make raven and r7 work for now
if 'raven' in self.input_file_widget.app_path or 'r7' in self.input_file_widget.app_path:
output_file_names = [common_file_base + '_displaced.e']
output_block_names = ['']
# Return the list of file and block names
return [output_file_names, output_block_names]
def _itemHasEditableParameters(self, item):
this_path = self.generatePathFromItem(item)
this_path = '/' + self.action_syntax.getPath(this_path) # Get the real action path associated with this item
yaml_entry = self.input_file_widget.yaml_data.findYamlEntry(this_path)
has_type_subblock = False
if 'subblocks' in yaml_entry and yaml_entry['subblocks']:
for sb in yaml_entry['subblocks']:
if '<type>' in sb['name']:
has_type_subblock = True
if ('parameters' in yaml_entry and yaml_entry['parameters'] != None) or has_type_subblock or this_path == '/GlobalParams':
return True
def _addDataRecursively(self, parent_item, node):
is_active = 'active' not in node.parent.params or node.name in node.parent.params['active'].split(' ')
table_data = node.params
table_data['Name'] = node.name
param_comments = node.param_comments
comment = '\n'.join(node.comments)
new_child = self.findChildItemWithName(parent_item, table_data['Name'])
if not new_child: # If we didn't find a child that already matched then create a new child
new_child = QtGui.QTreeWidgetItem(parent_item)
new_child.setText(0,table_data['Name'])
# parent_item.addChild(new_child)
new_child.table_data = {}
new_child.param_comments = []
new_child.comment = ''
has_params = False
# See if there are any actual parameters for this item
for name,value in node.params.items():
if name != 'active':
has_params = True
if has_params:
new_child.table_data = copy.deepcopy(table_data)
if 'active' in new_child.table_data:
del new_child.table_data['active']
new_child.param_comments = param_comments
new_child.comment = comment
new_child.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
if is_active:
new_child.setCheckState(0, QtCore.Qt.Checked)
else:
new_child.setCheckState(0, QtCore.Qt.Unchecked)
if new_child.text(0) == 'Mesh':
if 'type' not in new_child.table_data:
new_child.table_data['type'] = 'FileMesh'
self.mesh_item_changed.emit(new_child)
if new_child.text(0) == 'Problem':
if 'type' not in new_child.table_data:
new_child.table_data['type'] = 'FEProblem'
for child, child_node in node.children.items():
self._addDataRecursively(new_child, child_node)
def _recursivelyAddTreeItems(self, split_path, parent):
this_piece = split_path[0]
this_item = None
found_it = False
is_star = False
if this_piece == '*':
found_it = True
is_star = True
num_children = 0
try: # This will fail when we're dealing with the QTreeWidget itself
num_children = parent.childCount()
except:
num_children = parent.topLevelItemCount()
for i in range(num_children):
child = None
try: # This will fail when we're dealing with the QTreeWidget itself
child = parent.child(i)
except:
child = parent.topLevelItem(i)
if child.text(0) == this_piece:
this_item = child
found_it = True
if not found_it:
# Add it
this_item = QtGui.QTreeWidgetItem(parent)
this_item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
this_item.setCheckState(0, QtCore.Qt.Unchecked)
this_item.setText(0, this_piece)
this_item.table_data = {}
this_item.param_comments = []
this_item.comment = ''
this_path = self.generatePathFromItem(this_item)
if self.action_syntax.hasStar(this_path):
this_item.setForeground(0, QtCore.Qt.blue)
if len(split_path) > 1:
if not is_star:
self._recursivelyAddTreeItems(split_path[1:], this_item)
else: # If it is a star and there are children - then add it to all of the children
for i in range(num_children):
child = None
try: # This will fail when we're dealing with the QTreeWidget itself
child = parent.child(i)
except:
child = parent.topLevelItem(i)
self._recursivelyAddTreeItems(split_path[1:], child)
def getChildNames(self, parent):
if not parent:
return []
try: # This will fail when we're dealing with the QTreeWidget itself
num_children = parent.childCount()
except:
num_children = parent.topLevelItemCount()
children_names = []
for i in range(num_children):
child = None
try: # This will fail when we're dealing with the QTreeWidget itself
child = parent.child(i)
except:
child = parent.topLevelItem(i)
children_names.append(child.text(0))
return children_names
def getChildNamesOfPathRecurse(self, current_item, path_pieces):
if not len(path_pieces):
children = self.getChildNames(current_item)
if not children:
return []
return children
next_item = self.findChildItemWithName(current_item, path_pieces[0])
if not next_item:
return []
return self.getChildNamesOfPathRecurse(next_item, path_pieces[1:])
''' Pass in a path, get out the children names underneath that path
Will return an empty list on failure '''
def getChildNamesOfPath(self, path):
path_pieces = path.strip('/').split('/')
return self.getChildNamesOfPathRecurse(self, path_pieces)
def _doubleClickedItem(self, item, column):
# Make sure the syntax is up to date
self.input_file_widget.recache()
this_path = self.generatePathFromItem(item)
if not self.action_syntax.isPath(this_path) or self._itemHasEditableParameters(item):
already_had_data = False
try:
item.table_data # If this fails we will jump to "except"...
except:
item.table_data = None
parent_path = ''
this_path_is_hard = False
if self.action_syntax.isPath(this_path):
this_path_is_hard = True
this_path = '/' + self.action_syntax.getPath(this_path) # Get the real action path associated with this item
parent_path = this_path
else:
parent_path = self.generatePathFromItem(item.parent())
parent_path = '/' + self.action_syntax.getPath(parent_path)
yaml_entry = self.input_file_widget.yaml_data.findYamlEntry(parent_path)
global_params = {}
global_params_item = self.findChildItemWithName(self, 'GlobalParams')
# Don't pass in global_params for the GlobalParams block!
if global_params_item and 'GlobalParams' not in this_path:
global_params = global_params_item.table_data
# Hack!
if 'Outputs' in this_path:
new_gui = OptionsGUI(yaml_entry, self.action_syntax, item.text(column), item.table_data, item.param_comments, item.comment, False, self.application.typeOptions(), global_params, this_path_is_hard)
else:
new_gui = OptionsGUI(yaml_entry, self.action_syntax, item.text(column), item.table_data, item.param_comments, item.comment, False, self.application.typeOptions(), global_params, False)
if item.table_data:
new_gui.incoming_data = item.table_data
if new_gui.exec_():
item.table_data = new_gui.result()
item.param_comments = new_gui.param_table.param_comments
item.comment = new_gui.param_table.comment
if not self.action_syntax.isPath(this_path): # Don't change the name of hard paths
item.setText(0,item.table_data['Name'])
item.setCheckState(0, QtCore.Qt.Checked)
if item.text(0) == 'Mesh':
self.mesh_item_changed.emit(item)
self._updateOtherGUIElements()
def _itemChanged(self, item, column):
self._updateOtherGUIElements()
def _deleteCurrentItem(self):
item = self.currentItem()
parent = item.parent()
if parent:
parent.removeChild(item)
else: #Must be a top level item
self.removeItemWidget(item, 0)
self.addHardPathsToTree() # We do this here because they might have removed a hard path... but there is no way to get them back
self._updateOtherGUIElements()
def _editComment(self):
item = self.currentItem()
ce = CommentEditor(item)
if ce.exec_():
self._itemChanged(item, 0)
def _addItem(self):
# Make sure the syntax is up to date
self.input_file_widget.recache()
item = self.currentItem()
this_path = self.generatePathFromItem(item)
this_path = '/' + self.action_syntax.getPath(this_path) # Get the real action path associated with this item
yaml_entry = self.input_file_widget.yaml_data.findYamlEntry(this_path)
global_params = {}
global_params_item = self.findChildItemWithName(self, 'GlobalParams')
if global_params_item:
global_params = global_params_item.table_data
self.new_gui = OptionsGUI(yaml_entry, self.action_syntax, item.text(0), None, None, None, False, self.application.typeOptions(), global_params, False)
if self.new_gui.exec_():
table_data = self.new_gui.result()
param_comments = self.new_gui.param_table.param_comments
comment = self.new_gui.param_table.comment
new_child = QtGui.QTreeWidgetItem(item)
new_child.setText(0,table_data['Name'])
new_child.table_data = table_data
new_child.param_comments = param_comments
new_child.comment = comment
new_child.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
new_child.setCheckState(0, QtCore.Qt.Checked)
item.addChild(new_child)
item.setCheckState(0, QtCore.Qt.Checked)
item.setExpanded(True)
self.setCurrentItem(new_child)
if item.text(0) == 'Mesh':
self.mesh_item_changed.emit(item)
self._updateOtherGUIElements()
self.addHardPathsToTree() # We do this here because * paths might add more paths underneath the item we just added
def _newContext(self, pos):
global_pos = self.mapToGlobal(pos)
item = self.itemAt(pos)
this_path = self.generatePathFromItem(item)
menu = QtGui.QMenu(self)
# Don't allow deletion of hard paths
if self.action_syntax.hasStar(this_path): # If it is a hard path allow them to add a child
add_action = QtGui.QAction("Add...", self)
add_action.triggered.connect(self._addItem)
menu.addAction(add_action)
else:
delete_action = QtGui.QAction("Delete", self)
delete_action.triggered.connect(self._deleteCurrentItem)
menu.addAction(delete_action)
comment_action = QtGui.QAction("Edit Comment...", self)
comment_action.triggered.connect(self._editComment)
menu.addAction(comment_action)
menu.popup(global_pos)
def _updateOtherGUIElements(self):
self.tree_changed.emit()
self.input_file_widget.input_file_textbox.updateTextBox()
# Update the output selection box (TODO: This only needs to run when [Outputs] is changed)
if hasattr(self.application.main_window, "visualize_widget"):
self._output_file_names = []
self._output_block_names = []
self.application.main_window.visualize_widget.updateOutputControl()
def _currentItemChanged(self, current, previous):
if not current:
return
if 'boundary' in current.table_data:
self.input_file_widget.mesh_render_widget.highlightBoundary(current.table_data['boundary'])
elif 'master' in current.table_data:
if 'slave' in current.table_data:
self.input_file_widget.mesh_render_widget.highlightBoundary(current.table_data['master']+' '+current.table_data['slave'])
elif 'block' in current.table_data:
self.input_file_widget.mesh_render_widget.highlightBlock(current.table_data['block'])
elif previous and hasattr(previous, 'table_data') and ('boundary' in previous.table_data or 'block' in previous.table_data or ('master' in previous.table_data and 'slave' in previous.table_data)):
self.input_file_widget.mesh_render_widget.clearHighlight()
|
aliyun/oss-ftp
|
refs/heads/master
|
python27/unix/lib/urllib3/util/response.py
|
199
|
from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks, wether a the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
|
serpilliere/miasm
|
refs/heads/master
|
example/expression/simplification_add.py
|
5
|
from __future__ import print_function
import miasm.expression.expression as m2_expr
from miasm.expression.simplifications import expr_simp
from pdb import pm
print("""
Expression simplification demo: Adding a simplification:
a + a + a == a * 3
More detailed examples can be found in miasm/expression/simplification*.
""")
# Define the simplification method
## @expr_simp is the current expression simplifier instance
## (for recursive simplifications)
## @expr is the expression to (perhaps) simplify
def simp_add_mul(expr_simp, expr):
"Naive Simplification: a + a + a == a * 3"
# Match the expected form
## isinstance(expr, m2_expr.ExprOp) is not needed: simplifications are
## attached to expression types
if expr.op == "+" and \
len(expr.args) == 3 and \
expr.args.count(expr.args[0]) == len(expr.args):
# Effective simplification
return m2_expr.ExprOp("*", expr.args[0],
m2_expr.ExprInt(3, expr.args[0].size))
else:
# Do not simplify
return expr
a = m2_expr.ExprId('a', 32)
base_expr = a + a + a
print("Without adding the simplification:")
print("\t%s = %s" % (base_expr, expr_simp(base_expr)))
# Enable pass
expr_simp.enable_passes({m2_expr.ExprOp: [simp_add_mul]})
print("After adding the simplification:")
print("\t%s = %s" % (base_expr, expr_simp(base_expr)))
# Automatic fail
assert(expr_simp(base_expr) == m2_expr.ExprOp("*", a,
m2_expr.ExprInt(3, a.size)))
|
xxshutong/openerp-7.0
|
refs/heads/master
|
openerp/addons/process/__openerp__.py
|
65
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Enterprise Process',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module shows the basic processes involved in the selected modules and in the sequence they occur.
======================================================================================================
**Note:** This applies to the modules containing modulename_process.xml.
**e.g.** product/process/product_process.xml.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['web'],
'data': [
'security/ir.model.access.csv',
'process_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/process_nodes.jpeg','images/process_transitions.jpeg', 'images/processes.jpeg'],
'js': [
'static/src/js/process.js'
],
'css': [
'static/src/css/process.css'
],
'qweb': [
'static/src/xml/*.xml'
],
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wjr1005/p2pool
|
refs/heads/master
|
wstools/tests/test_wstools.py
|
308
|
#!/usr/bin/env python
############################################################################
# Joshua R. Boverhof, David W. Robertson, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import unittest, tarfile, os, ConfigParser
import test_wsdl
SECTION='files'
CONFIG_FILE = 'config.txt'
def extractFiles(section, option):
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
archives = config.get(section, option)
archives = eval(archives)
for file in archives:
tar = tarfile.open(file)
if not os.access(tar.membernames[0], os.R_OK):
for i in tar.getnames():
tar.extract(i)
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(test_wsdl.makeTestSuite("services_by_file"))
return suite
def main():
extractFiles(SECTION, 'archives')
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
|
danakj/chromium
|
refs/heads/master
|
build/gyp_chromium_test.py
|
27
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(os.path.join(SRC_DIR, 'third_party', 'pymock'))
import mock
import gyp_chromium
class TestGetOutputDirectory(unittest.TestCase):
@mock.patch('os.environ', {})
@mock.patch('sys.argv', [__file__])
def testDefaultValue(self):
self.assertEqual(gyp_chromium.GetOutputDirectory(), 'out')
@mock.patch('os.environ', {'GYP_GENERATOR_FLAGS': 'output_dir=envfoo'})
@mock.patch('sys.argv', [__file__])
def testEnvironment(self):
self.assertEqual(gyp_chromium.GetOutputDirectory(), 'envfoo')
@mock.patch('os.environ', {'GYP_GENERATOR_FLAGS': 'output_dir=envfoo'})
@mock.patch('sys.argv', [__file__, '-Goutput_dir=cmdfoo'])
def testGFlagOverridesEnv(self):
self.assertEqual(gyp_chromium.GetOutputDirectory(), 'cmdfoo')
@mock.patch('os.environ', {})
@mock.patch('sys.argv', [__file__, '-G', 'output_dir=foo'])
def testGFlagWithSpace(self):
self.assertEqual(gyp_chromium.GetOutputDirectory(), 'foo')
class TestGetGypVars(unittest.TestCase):
@mock.patch('os.environ', {})
def testDefault(self):
self.assertEqual(gyp_chromium.GetGypVars([]), {})
@mock.patch('os.environ', {})
@mock.patch('sys.argv', [__file__, '-D', 'foo=bar'])
def testDFlags(self):
self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': 'bar'})
@mock.patch('os.environ', {})
@mock.patch('sys.argv', [__file__, '-D', 'foo'])
def testDFlagsNoValue(self):
self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': '1'})
@mock.patch('os.environ', {})
@mock.patch('sys.argv', [__file__, '-D', 'foo=bar', '-Dbaz'])
def testDFlagMulti(self):
self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': 'bar', 'baz': '1'})
if __name__ == '__main__':
unittest.main()
|
ChinaQuants/pyfolio
|
refs/heads/master
|
pyfolio/plotting.py
|
1
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import OrderedDict
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import matplotlib.lines as mlines
from sklearn import preprocessing
from . import utils
from . import timeseries
from . import pos
from . import _seaborn as sns
from . import txn
from .utils import APPROX_BDAYS_PER_MONTH
from functools import wraps
def plotting_context(func):
"""Decorator to set plotting context during function call."""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with context():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def context(context='notebook', font_scale=1.5, rc=None):
"""Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.context(font_scale=2):
>>> pyfolio.create_full_tear_sheet()
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97'}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale,
rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_title(
"Rolling Fama-French Single Factor Betas (%.0f-month)" % (
rolling_window / APPROX_BDAYS_PER_MONTH
)
)
ax.set_ylabel('beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
ax.set_ylim((-2.0, 2.0))
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = timeseries.aggregate_returns(returns,
'monthly')
monthly_ret_table = monthly_ret_table.unstack()
monthly_ret_table = np.round(monthly_ret_table, 3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={
"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly Returns (%)")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
ann_ret_df = pd.DataFrame(
timeseries.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual Returns")
ax.legend(['mean'])
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
monthly_ret_table = timeseries.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['mean'])
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of Monthly Returns")
return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['peak date', 'recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_title('Top %i Drawdown Periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], loc='upper left')
ax.set_xlabel('')
return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs):
"""Plots how far underwaterr returns are over time, or plots current
drawdown vs. date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.percentage)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = timeseries.cum_returns(returns, starting_value=1.0)
running_max = np.maximum.accumulate(df_cum_rets)
underwater = -100 * ((running_max - df_cum_rets) / running_max)
(underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)
ax.set_ylabel('Drawdown')
ax.set_title('Underwater Plot')
ax.set_xlabel('')
return ax
def plot_perf_stats(returns, factor_returns, ax=None):
"""Create box plot of some performance metrics of the strategy.
The width of the box whiskers is determined by a bootstrap.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
bootstrap_values = timeseries.perf_stats_bootstrap(returns,
factor_returns,
return_stats=False)
bootstrap_values = bootstrap_values.drop('kurtosis', axis='columns')
sns.boxplot(bootstrap_values, orient='h', ax=ax)
return ax
def show_perf_stats(returns, factor_returns, live_start_date=None,
bootstrap=False):
"""Prints some performance metrics of the strategy.
- Shows amount of time the strategy has been run in backtest and
out-of-sample (in live trading).
- Shows Omega ratio, max drawdown, Calmar ratio, annual return,
stability, Sharpe ratio, annual volatility, alpha, and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
bootstrap : boolean (optional)
Whether to perform bootstrap analysis for the performance
metrics.
- For more information, see timeseries.perf_stats_bootstrap
"""
if bootstrap:
perf_func = timeseries.perf_stats_bootstrap
else:
perf_func = timeseries.perf_stats
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
returns_backtest = returns[returns.index < live_start_date]
returns_live = returns[returns.index > live_start_date]
perf_stats_live = np.round(perf_func(
returns_live,
factor_returns=factor_returns), 2)
perf_stats_all = np.round(perf_func(
returns,
factor_returns=factor_returns), 2)
print('Out-of-Sample Months: ' +
str(int(len(returns_live) / APPROX_BDAYS_PER_MONTH)))
else:
returns_backtest = returns
print('Backtest Months: ' +
str(int(len(returns_backtest) / APPROX_BDAYS_PER_MONTH)))
perf_stats = np.round(perf_func(
returns_backtest,
factor_returns=factor_returns), 2)
if live_start_date is not None:
perf_stats = pd.concat(OrderedDict([
('Backtest', perf_stats),
('Out of sample', perf_stats_live),
('All history', perf_stats_all),
]), axis=1)
print(perf_stats)
def plot_rolling_returns(returns,
factor_returns=None,
live_start_date=None,
cone_std=None,
legend_loc='best',
volatility_match=False,
cone_function=timeseries.forecast_cone_bootstrap,
ax=None, **kwargs):
"""
Plots cumulative rolling returns versus some benchmarks'.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Additionally, a non-parametric cone plot may be added to the
out-of-sample returns region.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of a risk factor.
- This is in the same style as returns.
live_start_date : datetime, optional
The date when the strategy began live trading, after
its backtest period. This date should be normalized.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- See timeseries.forecast_cone_bounds for more details.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
volatility_match : bool, optional
Whether to normalize the volatility of the returns to those of the
benchmark returns. This helps compare strategies with different
volatilities. Requires passing of benchmark_rets.
cone_function : function, optional
Function to use when generating forecast probability cone.
The function signiture must follow the form:
def cone(in_sample_returns (pd.Series),
days_to_project_forward (int),
cone_std= (float, or tuple),
starting_value= (int, or float))
See timeseries.forecast_cone_bootstrap for an example.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_ylabel('Cumulative returns')
ax.set_xlabel('')
if volatility_match and factor_returns is None:
raise ValueError('volatility_match requires passing of'
'factor_returns.')
elif volatility_match and factor_returns is not None:
bmark_vol = factor_returns.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
cum_rets = timeseries.cum_returns(returns, 1.0)
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if factor_returns is not None:
cum_factor_returns = timeseries.cum_returns(
factor_returns[cum_rets.index], 1.0)
cum_factor_returns.plot(lw=2, color='gray',
label=factor_returns.name, alpha=0.60,
ax=ax, **kwargs)
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]
oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]
else:
is_cum_returns = cum_rets
oos_cum_returns = pd.Series([])
is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
if len(oos_cum_returns) > 0:
oos_cum_returns.plot(lw=4, color='red', alpha=0.6,
label='Live', ax=ax, **kwargs)
if cone_std is not None:
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
is_returns = returns.loc[returns.index < live_start_date]
cone_bounds = cone_function(
is_returns,
len(oos_cum_returns),
cone_std=cone_std,
starting_value=is_cum_returns[-1])
cone_bounds = cone_bounds.set_index(oos_cum_returns.index)
for std in cone_std:
ax.fill_between(cone_bounds.index,
cone_bounds[float(std)],
cone_bounds[float(-std)],
color='steelblue', alpha=0.5)
if legend_loc is not None:
ax.legend(loc=legend_loc)
ax.axhline(1.0, linestyle='--', color='black', lw=2)
return ax
def plot_rolling_beta(returns, factor_returns, legend_loc='best',
ax=None, **kwargs):
"""
Plots the rolling 6-month and 12-month beta versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.set_title("Rolling Portfolio Beta to " + str(factor_returns.name))
ax.set_ylabel('Beta')
rb_1 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6)
rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs)
rb_2 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12)
rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs)
ax.set_ylim((-2.5, 2.5))
ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_xlabel('')
ax.legend(['6-mo',
'12-mo'],
loc=legend_loc)
return ax
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling Sharpe ratio versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_window : int, optional
The days window over which to compute the sharpe ratio.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_sharpe_ts = timeseries.rolling_sharpe(
returns, rolling_window)
rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
ax.set_title('Rolling Sharpe ratio (6-month)')
ax.axhline(
rolling_sharpe_ts.mean(),
color='steelblue',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylim((-3.0, 6.0))
ax.set_ylabel('Sharpe ratio')
ax.set_xlabel('')
ax.legend(['Sharpe', 'Average'],
loc=legend_loc)
return ax
def plot_gross_leverage(returns, gross_lev, ax=None, **kwargs):
"""Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
gross_lev : pd.Series, optional
The leverage of a strategy.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gross_lev.plot(alpha=0.8, lw=0.5, color='g', legend=False, ax=ax,
**kwargs)
ax.axhline(gross_lev.mean(), color='g', linestyle='--', lw=3,
alpha=1.0)
ax.set_title('Gross Leverage')
ax.set_ylabel('Gross Leverage')
ax.set_xlabel('')
return ax
def plot_exposures(returns, positions_alloc, ax=None, **kwargs):
"""Plots a cake chart of the long and short exposure.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See
pos.get_percent_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
df_long_short = pos.get_long_short_pos(positions_alloc)
df_long_short.plot(
kind='area', color=['lightblue', 'green'], alpha=1.0,
ax=ax, **kwargs)
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_title("Long/Short Exposure")
ax.set_ylabel('Exposure')
ax.set_xlabel('')
return ax
def show_and_plot_top_positions(returns, positions_alloc,
show_and_plot=2, hide_positions=False,
legend_loc='real_best', ax=None,
**kwargs):
"""Prints and/or plots the exposures of the top 10 held positions of
all time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_percent_alloc.
show_and_plot : int, optional
By default, this is 2, and both prints and plots.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
By default, the legend will display below the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes, conditional
The axes that were plotted on.
"""
df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(
positions_alloc)
if show_and_plot == 1 or show_and_plot == 2:
print("\n")
print('Top 10 long positions of all time (and max%)')
print(pd.DataFrame(df_top_long).index.values)
print(np.round(pd.DataFrame(df_top_long)[0].values, 3))
print("\n")
print('Top 10 short positions of all time (and max%)')
print(pd.DataFrame(df_top_short).index.values)
print(np.round(pd.DataFrame(df_top_short)[0].values, 3))
print("\n")
print('Top 10 positions of all time (and max%)')
print(pd.DataFrame(df_top_abs).index.values)
print(np.round(pd.DataFrame(df_top_abs)[0].values, 3))
print("\n")
_, _, df_top_abs_all = pos.get_top_long_short_abs(
positions_alloc, top=9999)
print('All positions ever held')
print(pd.DataFrame(df_top_abs_all).index.values)
print(np.round(pd.DataFrame(df_top_abs_all)[0].values, 3))
print("\n")
if show_and_plot == 0 or show_and_plot == 2:
if ax is None:
ax = plt.gca()
positions_alloc[df_top_abs.index].plot(
title='Portfolio Allocation Over Time, Only Top 10 Holdings',
alpha=0.4, ax=ax, **kwargs)
# Place legend below plot, shrink plot by 20%
if legend_loc == 'real_best':
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
else:
ax.legend(loc=legend_loc)
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylabel('Exposure by stock')
if hide_positions:
ax.legend_.remove()
return ax
def plot_max_median_position_concentration(positions, ax=None, **kwargs):
"""
Plots the max and median of long and short position concentrations
over the time.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gcf()
alloc_summary = pos.get_max_median_position_concentration(positions)
colors = ['mediumblue', 'steelblue', 'tomato', 'firebrick']
alloc_summary.plot(linewidth=1, color=colors, alpha=0.6, ax=ax)
ax.legend(loc='center left')
ax.set_ylabel('Exposure')
ax.set_title('Long/Short Max and Median Position Concentration')
return ax
def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):
"""Plots the sector exposures of the portfolio over time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
sector_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_sector_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gcf()
sector_alloc.plot(title='Sector Allocation Over Time',
alpha=0.4, ax=ax, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
ax.set_xlim((sector_alloc.index[0], sector_alloc.index[-1]))
ax.set_ylabel('Exposure by sector')
return ax
def plot_return_quantiles(returns, df_weekly, df_monthly, ax=None, **kwargs):
"""Creates a box plot of daily, weekly, and monthly return
distributions.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
df_weekly : pd.Series
Weekly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
df_monthly : pd.Series
Monthly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
sns.boxplot(data=[returns, df_weekly, df_monthly],
ax=ax, **kwargs)
ax.set_xticklabels(['daily', 'weekly', 'monthly'])
ax.set_title('Return quantiles')
return ax
def show_return_range(returns, df_weekly):
"""
Print monthly return and weekly return standard deviations.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
df_weekly : pd.Series
Weekly returns of the strategy, noncumulative.
- See timeseries.aggregate_returns.
"""
two_sigma_daily = returns.mean() - 2 * returns.std()
two_sigma_weekly = df_weekly.mean() - 2 * df_weekly.std()
var_sigma = pd.Series([two_sigma_daily, two_sigma_weekly],
index=['2-sigma returns daily',
'2-sigma returns weekly'])
print(np.round(var_sigma, 3))
def plot_turnover(returns, transactions, positions,
legend_loc='best', ax=None, **kwargs):
"""Plots turnover vs. date.
Turnover is the number of shares traded for a period as a fraction
of total shares.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.one_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_turnover = txn.get_turnover(positions, transactions)
df_turnover_by_month = df_turnover.resample("M")
df_turnover.plot(color='steelblue', alpha=1.0, lw=0.5, ax=ax, **kwargs)
df_turnover_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_turnover.mean(), color='steelblue', linestyle='--', lw=3, alpha=1.0)
ax.legend(['Daily turnover',
'Average daily turnover, by month',
'Average daily turnover, net'],
loc=legend_loc)
ax.set_title('Daily Turnover')
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylim((0, 1))
ax.set_ylabel('Turnover')
ax.set_xlabel('')
return ax
def plot_slippage_sweep(returns, transactions, positions,
slippage_params=(3, 8, 10, 12, 15, 20, 50),
ax=None, **kwargs):
"""Plots a equity curves at different per-dollar slippage assumptions.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
slippage_params: tuple
Slippage pameters to apply to the return time series (in
basis points).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
slippage_sweep = pd.DataFrame()
for bps in slippage_params:
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
label = str(bps) + " bps"
slippage_sweep[label] = timeseries.cum_returns(adj_returns, 1)
slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)
ax.set_title('Cumulative Returns Given Additional Per-Dollar Slippage')
ax.set_ylabel('')
ax.legend(loc='center left')
return ax
def plot_slippage_sensitivity(returns, transactions, positions,
ax=None, **kwargs):
"""Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
avg_returns_given_slippage = pd.Series()
for bps in range(1, 100):
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
avg_returns = timeseries.annual_return(
adj_returns)
avg_returns_given_slippage.loc[bps] = avg_returns
avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)
ax.set(title='Average Annual Returns Given Additional Per-Dollar Slippage',
xticks=np.arange(0, 100, 10),
ylabel='Average Annual Return',
xlabel='Per-Dollar Slippage (bps)')
return ax
def plot_daily_turnover_hist(transactions, positions,
ax=None, **kwargs):
"""Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions, period=None)
sns.distplot(turnover, ax=ax, **kwargs)
ax.set_title('Distribution of Daily Turnover Rates')
ax.set_xlabel('Turnover Rate')
return ax
def plot_daily_volume(returns, transactions, ax=None, **kwargs):
"""Plots trading volume per day vs. date.
Also displays all-time daily average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
daily_txn = txn.get_txn_vol(transactions)
daily_txn.txn_shares.plot(alpha=1.0, lw=0.5, ax=ax, **kwargs)
ax.axhline(daily_txn.txn_shares.mean(), color='steelblue',
linestyle='--', lw=3, alpha=1.0)
ax.set_title('Daily Trading Volume')
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
ax.set_xlim((df_cum_rets.index[0], df_cum_rets.index[-1]))
ax.set_ylabel('Amount of shares traded')
ax.set_xlabel('')
return ax
def plot_daily_returns_similarity(returns_backtest, returns_live,
title='', scale_kws=None, ax=None,
**kwargs):
"""Plots overlapping distributions of in-sample (backtest) returns
and out-of-sample (live trading) returns.
Parameters
----------
returns_backtest : pd.Series
Daily returns of the strategy's backtest, noncumulative.
returns_live : pd.Series
Daily returns of the strategy's live trading, noncumulative.
title : str, optional
The title to use for the plot.
scale_kws : dict, optional
Additional arguments passed to preprocessing.scale.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
if scale_kws is None:
scale_kws = {}
sns.kdeplot(preprocessing.scale(returns_backtest, **scale_kws),
bw='scott', shade=True, label='backtest',
color='forestgreen', ax=ax, **kwargs)
sns.kdeplot(preprocessing.scale(returns_live, **scale_kws),
bw='scott', shade=True, label='out-of-sample',
color='red', ax=ax, **kwargs)
ax.set_title(title)
return ax
def show_worst_drawdown_periods(returns, top=5):
"""Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
print('\nWorst Drawdown Periods')
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
drawdown_df['net drawdown in %'] = list(
map(utils.round_two_dec_places, drawdown_df['net drawdown in %']))
print(drawdown_df.sort('net drawdown in %', ascending=False))
def plot_monthly_returns_timeseries(returns, ax=None, **kwargs):
"""
Plots monthly returns as a timeseries.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
def cumulate_returns(x):
return timeseries.cum_returns(x)[-1]
if ax is None:
ax = plt.gca()
monthly_rets = returns.resample('M', how=cumulate_returns).to_period()
sns.barplot(x=monthly_rets.index,
y=monthly_rets.values,
color='steelblue')
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
# only show x-labels on year boundary
xticks_coord = []
xticks_label = []
count = 0
for i in monthly_rets.index:
if i.month == 1:
xticks_label.append(i)
xticks_coord.append(count)
# plot yearly boundary line
ax.axvline(count, color='gray', ls='--', alpha=0.3)
count += 1
ax.axhline(0.0, color='darkgray', ls='-')
ax.set_xticks(xticks_coord)
ax.set_xticklabels(xticks_label)
return ax
def plot_round_trip_life_times(round_trips, ax=None):
"""
Plots timespans and directions of round trip trades.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.subplot()
symbols = round_trips.symbol.unique()
symbol_idx = pd.Series(np.arange(len(symbols)), index=symbols)
for symbol, sym_round_trips in round_trips.groupby('symbol'):
for _, row in sym_round_trips.iterrows():
c = 'b' if row.long else 'r'
y_ix = symbol_idx[symbol]
ax.plot([row['open_dt'], row['close_dt']],
[y_ix, y_ix], color=c)
ax.set_yticklabels(symbols)
red_line = mlines.Line2D([], [], color='r', label='Short')
blue_line = mlines.Line2D([], [], color='b', label='Long')
ax.legend(handles=[red_line, blue_line], loc=0)
return ax
def show_profit_attribution(round_trips):
"""
Prints the share of total PnL contributed by each
traded name.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
total_pnl = round_trips['pnl'].sum()
pct_profit_attribution = round_trips.groupby(
'symbol')['pnl'].sum() / total_pnl
print('\nProfitability (PnL / PnL total) per name:')
print(pct_profit_attribution.sort(inplace=False, ascending=False))
def plot_prob_profit_trade(round_trips, ax=None):
"""
Plots a probability distribution for the event of making
a profitable trade.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
x = np.linspace(0, 1., 500)
round_trips['profitable'] = round_trips.pnl > 0
dist = sp.stats.beta(round_trips.profitable.sum(),
(~round_trips.profitable).sum())
y = dist.pdf(x)
lower_perc = dist.ppf(.025)
upper_perc = dist.ppf(.975)
lower_plot = dist.ppf(.001)
upper_plot = dist.ppf(.999)
if ax is None:
ax = plt.subplot()
ax.plot(x, y)
ax.axvline(lower_perc, color='0.5')
ax.axvline(upper_perc, color='0.5')
ax.set(xlabel='Probability making a profitable decision', ylabel='Belief',
xlim=(lower_plot, upper_plot), ylim=(0, y.max() + 1.))
return ax
|
rsharo/floodlight
|
refs/heads/master
|
example/graphDeps.py
|
148
|
#!/usr/bin/python
import urllib2
import json
import sys
def simple_json_get(url):
return json.loads(urllib2.urlopen(url).read())
def shorten(s):
return s.replace('net.floodlightcontroller','n.f'
).replace('com.bigswitch','c.b')
def usage(s):
sys.stderr.write("Usage:\ngrahDeps.py hostname [port]\n%s" % s)
sys.stderr.write("\n\n\n\n writes data to 'hostname.dot' for use with graphviz\n")
sys.exit(1)
if __name__ == '__main__':
host='localhost'
port=8080
if len(sys.argv) == 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
usage("need to specify hostname")
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
sys.stderr.write("Connecting to %s:%d ..." % (host,port))
URL="http://%s:%d/wm/core/module/loaded/json" % (host,port)
deps = simple_json_get(URL)
serviceMap = {}
nodeMap = {}
nodeCount = 0
sys.stderr.write("Writing to %s.dot ..." % (host))
f = open("%s.dot" % host, 'w')
f.write( "digraph Deps {\n")
for mod, info in deps.iteritems():
# sys.stderr.write("Discovered module %s\n" % mod)
nodeMap[mod] = "n%d" % nodeCount
nodeCount += 1
label = shorten(mod) + "\\n"
for service, serviceImpl in info['provides'].iteritems():
# sys.stderr.write(" Discovered service %s implemented with %s\n" % (service,serviceImpl))
label += "\\nService=%s" % shorten(service)
serviceMap[serviceImpl] = mod
f.write(" %s [ label=\"%s\", color=\"blue\"];\n" % (nodeMap[mod], label))
f.write("\n") # for readability
for mod, info in deps.iteritems():
for dep, serviceImpl in info['depends'].iteritems():
f.write(" %s -> %s [ label=\"%s\"];\n" % (
nodeMap[mod],
shorten(nodeMap[serviceMap[serviceImpl]]),
shorten(dep)))
f.write("}\n")
f.close();
sys.stderr.write("Now type\ndot -Tpdf -o %s.pdf %s.dot\n" % (
host, host))
|
Omegaphora/external_chromium_org
|
refs/heads/lp5.1
|
tools/traceline/traceline/scripts/split.py
|
186
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Splits a single json file (read from stdin) into separate files of 40k
records, named split.X.
"""
import sys
def main():
filecount = 0
count = 0
f = open('split.0', 'wb')
for l in sys.stdin:
if l == "},\r\n":
count += 1
if count == 40000:
f.write("}]);\r\n")
count = 0
filecount += 1
f = open('split.%d' % filecount, 'wb')
f.write("parseEvents([\r\n")
continue
f.write(l)
if __name__ == '__main__':
main()
|
jrief/django-cms
|
refs/heads/master
|
cms/extensions/__init__.py
|
82
|
from .models import PageExtension # nopyflakes
from .models import TitleExtension # nopyflakes
from .extension_pool import extension_pool # nopyflakes
from .admin import PageExtensionAdmin # nopyflakes
from .admin import TitleExtensionAdmin # nopyflakes
|
nicolargo/intellij-community
|
refs/heads/master
|
python/testData/addImport/newThirdPartyImportInBetween/main.after.py
|
75
|
import sys
import third_party
import a
print(sys, third_party, a)
|
vprime/puuuu
|
refs/heads/master
|
env/lib/python2.7/site-packages/south/db/firebird.py
|
93
|
# firebird
from __future__ import print_function
import datetime
from django.db import connection, models
from django.core.management.color import no_style
from django.db.utils import DatabaseError
from south.db import generic
from south.utils.py3 import string_types
class DatabaseOperations(generic.DatabaseOperations):
backend_name = 'firebird'
alter_string_set_type = 'ALTER %(column)s TYPE %(type)s'
alter_string_set_default = 'ALTER %(column)s SET DEFAULT %(default)s;'
alter_string_drop_null = ''
add_column_string = 'ALTER TABLE %s ADD %s;'
delete_column_string = 'ALTER TABLE %s DROP %s;'
rename_table_sql = ''
# Features
allows_combined_alters = False
has_booleans = False
def _fill_constraint_cache(self, db_name, table_name):
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
rows = self.execute("""
SELECT
rc.RDB$CONSTRAINT_NAME,
rc.RDB$CONSTRAINT_TYPE,
cc.RDB$TRIGGER_NAME
FROM rdb$relation_constraints rc
JOIN rdb$check_constraints cc
ON rc.rdb$constraint_name = cc.rdb$constraint_name
WHERE rc.rdb$constraint_type = 'NOT NULL'
AND rc.rdb$relation_name = '%s'
""" % table_name)
for constraint, kind, column in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((kind, constraint))
return
def _alter_column_set_null(self, table_name, column_name, is_null):
sql = """
UPDATE RDB$RELATION_FIELDS SET RDB$NULL_FLAG = %(null_flag)s
WHERE RDB$FIELD_NAME = '%(column)s'
AND RDB$RELATION_NAME = '%(table_name)s'
"""
null_flag = 'NULL' if is_null else '1'
return sql % {
'null_flag': null_flag,
'column': column_name.upper(),
'table_name': table_name.upper()
}
def _column_has_default(self, params):
sql = """
SELECT a.RDB$DEFAULT_VALUE
FROM RDB$RELATION_FIELDS a
WHERE a.RDB$FIELD_NAME = '%(column)s'
AND a.RDB$RELATION_NAME = '%(table_name)s'
"""
value = self.execute(sql % params)
return True if value else False
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Historically, we used to set defaults here.
# But since South 0.8, we don't ever set defaults on alter-column -- we only
# use database-level defaults as scaffolding when adding columns.
# However, we still sometimes need to remove defaults in alter-column.
if self._column_has_default(params):
sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), []))
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
columns = []
autoinc_sql = ''
for field_name, field in fields:
# avoid default values in CREATE TABLE statements (#925)
field._suppress_default = True
col = self.column_sql(table_name, field_name, field)
if not col:
continue
columns.append(col)
if isinstance(field, models.AutoField):
field_name = field.db_column or field.column
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
self.execute(self.create_table_sql % {
"table": self.quote_name(table_name),
"columns": ', '.join([col for col in columns if col]),
})
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
def rename_table(self, old_table_name, table_name):
"""
Renames table is not supported by firebird.
This involve recreate all related objects (store procedure, views, triggers, etc)
"""
pass
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=False):
"""
Deletes the table 'table_name'.
Firebird will also delete any triggers associated with the table.
"""
super(DatabaseOperations, self).delete_table(table_name, cascade=False)
# Also, drop sequence if exists
sql = connection.ops.drop_sequence_sql(table_name)
if sql:
try:
self.execute(sql)
except:
pass
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
if field.primary_key:
field_output.append('NOT NULL PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, string_types):
default = "'%s'" % default.replace("'", "''")
elif isinstance(default, (datetime.date, datetime.time, datetime.datetime)):
default = "'%s'" % default
elif isinstance(default, bool):
default = int(default)
# Escape any % signs in the output (bug #317)
if isinstance(default, string_types):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
# Firebird need set not null after of default value keyword
if not field.primary_key and not field.null:
sql += ' NOT NULL'
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# Avoid double index creation (#1317)
# Firebird creates an index implicity for each foreign key field
# sql_indexes_for_field tries to create an index for that field too
if not field.rel:
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
def _drop_constraints(self, table_name, name, field):
if self.has_check_constraints:
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop or add UNIQUE constraint
unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE"))
if field.unique and not unique_constraint:
self.create_unique(table_name, [name])
elif not field.unique and unique_constraint:
self.delete_unique(table_name, [name])
# Drop all foreign key constraints
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# There weren't any
pass
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
if self.dry_run:
if self.debug:
print(' - no dry run output for alter_column() due to dynamic DDL, sorry')
return
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
else:
field.column = name
if not ignore_constraints:
# Drop all check constraints. Note that constraints will be added back
# with self.alter_string_set_type and self.alter_string_drop_null.
self._drop_constraints(table_name, name, field)
# First, change the type
params = {
"column": self.quote_name(name),
"type": self._db_type_for_alter_column(field),
"table_name": table_name
}
# SQLs is a list of (SQL, values) pairs.
sqls = []
sqls_extra = []
# Only alter the column if it has a type (Geometry ones sometimes don't)
if params["type"] is not None:
sqls.append((self.alter_string_set_type % params, []))
# Add any field- and backend- specific modifications
self._alter_add_column_mods(field, name, params, sqls)
# Next, nullity: modified, firebird doesn't support DROP NOT NULL
sqls_extra.append(self._alter_column_set_null(table_name, name, field.null))
# Next, set any default
self._alter_set_defaults(field, name, params, sqls)
# Finally, actually change the column
if self.allows_combined_alters:
sqls, values = list(zip(*sqls))
self.execute(
"ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)),
generic.flatten(values),
)
else:
# Databases like e.g. MySQL don't like more than one alter at once.
for sql, values in sqls:
try:
self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values)
except DatabaseError as e:
print(e)
# Execute extra sql, which don't need ALTER TABLE statement
for sql in sqls_extra:
self.execute(sql)
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
@generic.copy_column_constraints
@generic.delete_column_constraints
def rename_column(self, table_name, old, new):
if old == new:
# Short-circuit out
return []
self.execute('ALTER TABLE %s ALTER %s TO %s;' % (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
))
|
umago/kabukiman
|
refs/heads/master
|
kabukiman/core/shortcut/__init__.py
|
4
|
#!/usr/bin/python
# coding: utf-8
# Copyright (C) 2010 Lucas Alvares Gomes <lucasagomes@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
|
epantry/django-sql-explorer
|
refs/heads/master
|
explorer/tests/test_schema.py
|
2
|
# -*- coding: utf-8 -*-
from unittest.mock import patch
from django.core.cache import cache
from django.db import connection
from django.test import TestCase
from explorer.app_settings import EXPLORER_DEFAULT_CONNECTION as CONN
from explorer import schema
class TestSchemaInfo(TestCase):
def setUp(self):
cache.clear()
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_schema_info_returns_valid_data(self, mocked_excludes,
mocked_includes):
mocked_includes.return_value = None
mocked_excludes.return_value = []
res = schema.schema_info(CONN)
assert mocked_includes.called # sanity check: ensure patch worked
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_table_exclusion_list(self, mocked_excludes, mocked_includes):
mocked_includes.return_value = None
mocked_excludes.return_value = ('explorer_',)
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn('explorer_query', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_app_inclusion_list(self, mocked_excludes, mocked_includes):
mocked_includes.return_value = ('auth_',)
mocked_excludes.return_value = []
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn('explorer_query', tables)
self.assertIn('auth_user', tables)
@patch('explorer.schema._get_includes')
@patch('explorer.schema._get_excludes')
def test_app_inclusion_list_excluded(self, mocked_excludes,
mocked_includes):
# Inclusion list "wins"
mocked_includes.return_value = ('explorer_',)
mocked_excludes.return_value = ('explorer_',)
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
@patch('explorer.schema._include_views')
def test_app_include_views(self, mocked_include_views):
database_view = setup_sample_database_view()
mocked_include_views.return_value = True
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn(database_view, tables)
@patch('explorer.schema._include_views')
def test_app_exclude_views(self, mocked_include_views):
database_view = setup_sample_database_view()
mocked_include_views.return_value = False
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertNotIn(database_view, tables)
@patch('explorer.schema.do_async')
def test_builds_async(self, mocked_async_check):
mocked_async_check.return_value = True
self.assertIsNone(schema.schema_info(CONN))
res = schema.schema_info(CONN)
tables = [x[0] for x in res]
self.assertIn('explorer_query', tables)
def setup_sample_database_view():
with connection.cursor() as cursor:
cursor.execute(
"CREATE VIEW IF NOT EXISTS v_explorer_query AS SELECT title, "
"sql from explorer_query"
)
return 'v_explorer_query'
|
cpina/science-cruise-data-management
|
refs/heads/master
|
ScienceCruiseDataManagement/main/views.py
|
1
|
import glob
import json
import datetime
import geojson
import os
from django.conf import settings
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render
from django.utils import timezone
from django.views.generic import TemplateView, View, ListView
from django.core.exceptions import ObjectDoesNotExist
import main.import_gpx_to_stations
import main.models
from main import import_gpx_to_stations
from main.forms import InputShipDateTime, InputCoordinates, InputShipTimes
from main.models import Event, EventAction, Country, FilesStorage, FilesStorageGeneral, Port, Station,\
Message, SamplingMethod, ProposedStation, Leg, Depth, Sample, Person, ContactDetails
from ctd.models import CtdSampleVolume
from main import utils
from ship_data.models import GpggaGpsFix, GpvtgVelocity
import main.find_locations as find_locations
import subprocess
import main.utils_coordinates as utils_coordinates
from django.views.static import serve
from django.db.models import Sum
import geojson
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def calculate_km_travelled():
fp = open(settings.TRACK_MAP_FILEPATH)
g = geojson.load(fp)
previous = None
distance = 0
for item in g.get('coordinates'):
if previous is not None:
distance += utils_coordinates.calculate_distance((previous[1], previous[0]), (item[1], item[0]))
previous = item
return distance
def people_in_leg(number):
return Person.objects.filter(leg=Leg.objects.get(number=number)).count()
class StatsView(TemplateView):
template_name = "stats.html"
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
context['number_of_samples'] = Sample.objects.all().count()
context['number_of_events'] = Event.objects.filter(outcome="success").count()
context['litres_of_ctd_water'] = int(CtdSampleVolume.objects.all().aggregate(Sum('volume'))['volume__sum'])
context['km_travelled'] = int(calculate_km_travelled())
context['people_leg1'] = people_in_leg(1)
context['people_leg2'] = people_in_leg(2)
context['people_leg3'] = people_in_leg(3)
context['people_all_legs'] = Person.objects.\
filter(leg=Leg.objects.get(number=1)).\
filter(leg=Leg.objects.get(number=2)).\
filter(leg=Leg.objects.get(number=3)).count()
context['terrestial_sites'] = 13
context['most_southerly_point'] = "-74.009 -127.475"
return context
class MainMenuView(TemplateView):
template_name = "main_menu.html"
def get_context_data(self, **kwargs):
context = super(MainMenuView, self).get_context_data(**kwargs)
last_message = Message.objects.order_by('date_time')
if len(last_message) == 0:
message = "No message has been introduced yet, come back later"
date_time = "N/A"
person = "Data management team"
subject = "No message"
else:
last_message = Message.objects.order_by('-date_time').first()
message = last_message.message
date_time = last_message.date_time
person = last_message.person
subject = last_message.subject
now = utils.now_with_timezone()
location = utils.latest_ship_position()
context['message'] = message
context['date_time'] = date_time
context['person'] = person
context['subject'] = subject
context['date'] = now.strftime("%a %d %B %Y")
context['time'] = now.strftime("%H:%M:%S")
context['julian_day'] = now.strftime("%j")
if location.latitude is not None:
context['position_latitude'] = "{0:.4f}".format(location.latitude)
context['position_longitude'] = "{0:.4f}".format(location.longitude)
context['position_date_time'] = location.date_time
else:
context['position_latitude'] = "Unknown"
context['position_longitude'] = "Unknown"
context['position_date_time'] = "Unknown"
speed = latest_ship_speed()
if speed is not None:
context['speed_kts'] = speed
else:
context['speed_kts'] = "Unknown"
depths = Depth.objects.filter(depth__gt=0).order_by('-date_time')
if depths.exists():
depth = depths[0].depth
time1 = utils.set_utc(datetime.datetime.now())
time2 = utils.set_utc(depths[0].date_time)
depth_updated_seconds_ago = (time1-time2).seconds
else:
depth = "Unknown"
depth_updated_seconds_ago = "Unknown"
context['depth'] = depth
context['depth_updated_seconds_ago'] = depth_updated_seconds_ago
return context
class AccessingDataView(TemplateView):
template_name = "accessing_data.html"
def get_context_data(self, **kwargs):
context = super(AccessingDataView, self).get_context_data(**kwargs)
return context
class MainMapView(TemplateView):
template_name = "main_map.html"
def get_context_data(self, **kwargs):
context = super(MainMapView, self).get_context_data(**kwargs)
return context
class InteractiveMapView(TemplateView):
template_name = "interactive_map.html"
def get_context_data(self, **kwargs):
context = super(InteractiveMapView, self).get_context_data(**kwargs)
return context
class TrackJson(View):
def get(self, request_):
track = open(settings.TRACK_MAP_FILEPATH, "r")
return JsonResponse(json.load(track))
class PositionsJson(View):
def get(self, request_):
# Possibles colors: black, blue, green, grey, orange, red, violet, yellow
tbegins = main.models.EventAction.tbegin()
tinstant = main.models.EventAction.tinstant()
features = []
for eventAction in EventAction.objects.filter(Q(type=tbegins) | Q(type=tinstant)):
if eventAction.longitude is None or eventAction.latitude is None:
continue
point = geojson.Point((eventAction.longitude, eventAction.latitude))
link = '<a href="/admin/main/eventaction/{}/change/">{}</a>'.format(eventAction.id, eventAction.event.number)
id_text = "Event: {}".format(link)
date_time = eventAction.time.strftime("%Y-%m-%d %H:%M")
features.append(
geojson.Feature(geometry=point, properties={'id': id_text,
'text': "{}<br>{}<br>({:.2f}, {:.2f})".format(eventAction.event.sampling_method.name, date_time, eventAction.latitude, eventAction.longitude),
'marker_color': 'blue'}))
for port in Port.objects.all():
if port.longitude is None or port.latitude is None:
continue
point = geojson.Point((port.longitude, port.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'Port.{}'.format(port.id),
'text': port.name,
'marker_color': 'yellow'}))
for proposedstation in ProposedStation.objects.all():
if proposedstation.longitude is None or proposedstation.latitude is None:
continue
point = geojson.Point((proposedstation.longitude, proposedstation.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'Planned station',
'text': "{}<br>{}<br>({:.2f}, {:.2f})".format(proposedstation.name, proposedstation.comment, proposedstation.latitude, proposedstation.longitude),
'marker_color': 'red'}))
for station in Station.objects.all():
if station.longitude is None or station.latitude is None:
continue
link = '<a href="/admin/main/station/{}/change/">{}</a>'.format(station.name, station.name)
id_text = "Station: {}".format(link)
if station.arrival_time is not None:
date_time = station.arrival_time.strftime("%Y-%m-%d %H:%M")
else:
date_time = "Unknown arrival datetime"
point = geojson.Point((station.longitude, station.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': '{}'.format(id_text),
'text': "Type: {}<br>{}<br>({:.2f}, {:.2f})".format(station.type, date_time, station.latitude, station.longitude),
'marker_color': 'green'}))
location = utils.latest_ship_position()
point = geojson.Point((location.longitude, location.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'ship',
'text': 'You are here',
'marker_color': 'orange'}))
return JsonResponse(geojson.FeatureCollection(features))
class LatestShipPosition(View):
# simple view with only latitude, longitude and last ship position
def get(self, request_):
location = utils.latest_ship_position()
information = {}
information['latitude'] = location.latitude
information['longitude'] = location.longitude
information['date_time'] = location.date_time
return JsonResponse(information)
# class PositionsJson(View):
# def get(self, request):
# # print("-----------", request.GET['newer_than'])
# features = []
# for position in Position.objects.order_by('number'):
# point = geojson.Point((position.longitude, position.latitude))
#
# text = position.text
# if text is None:
# text = ""
#
# features.append(
# geojson.Feature(geometry=point, properties={'id': position.id,
# 'number': position.number,
# 'text': text,
# 'type': position.position_type.name
# }))
#
# return JsonResponse(geojson.FeatureCollection(features))
#
# def post(self, request):
# decoded_data = request.body.decode('utf-8')
# json_data = json.loads(decoded_data)
#
# # new POI to be inserted
# poi = Position()
# poi.latitude = json_data['latitude']
# poi.longitude = json_data['longitude']
# poi.position_type = PositionType.objects.get(name='Event')
# poi.save()
#
# print("POST",poi)
#
# return JsonResponse({'id': poi.id, 'text': poi.text})
#
# def put(self, request):
# decoded_data = request.body.decode('utf-8')
# json_data = json.loads(decoded_data)
#
# poi = Position.objects.get(id=json_data['id'])
#
# if 'latitude' in json_data:
# poi.latitude = json_data['latitude']
#
# if 'longitude' in json_data:
# poi.longitude = json_data['longitude']
#
# if 'text' in json_data:
# poi.text = json_data['text']
#
# poi.save()
# print("PUT ",poi)
# response = JsonResponse({'id': poi.id, 'text': poi.text})
#
# return response
class CountryListView(ListView):
model = Country
def get_context_data(self, **kwargs):
context = super(CountryListView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class EventListView(ListView):
model = Event
def get_context_data(self, **kwargs):
context = super(EventListView, self).get_context_data(**kwargs)
context['event_list'] = Event.objects.all()
print(Event.objects.all()[0])
return context
class FileStorageView(TemplateView):
template_name = "file_storage.html"
units = "GB"
def format_space_number(self, number):
if self.units == "GB":
conversion_from_kb = 1 / (1024 * 1024) # How many context['units'] in one KB
number *= conversion_from_kb
return "{0:.2f}".format(number)
else:
raise
def get_context_data(self, **kwargs):
context = super(FileStorageView, self).get_context_data(**kwargs)
context['file_storages'] = FilesStorage.objects.all()
context['units'] = "GB"
detailed_storage = []
for storage in context['file_storages']:
detailed_storage.append({'relative_path': str(storage.relative_path), context['units']: self.format_space_number(storage.kilobytes)})
context['detailed_storage_json'] = json.dumps(detailed_storage)
last_general_storage = FilesStorageGeneral.objects.latest('time')
context['general_storage_free'] = self.format_space_number(last_general_storage.free)
context['general_storage_used'] = self.format_space_number(last_general_storage.used)
context['general_storage_size'] = self.format_space_number(last_general_storage.free + last_general_storage.used)
context['general_storage_json'] = json.dumps({'used': self.format_space_number(last_general_storage.used), 'free': self.format_space_number(last_general_storage.free)})
return context
class DocumentsView(TemplateView):
template_name = "documents.html"
def get_context_data(self, **kwargs):
context = super(DocumentsView, self).get_context_data(**kwargs)
documents = []
directories = []
# Prepares a dictionary with the directory names as keys
for file in glob.glob(os.path.join(settings.DOCUMENTS_DIRECTORY, "*")):
if os.path.isdir(file):
directories.append(os.path.basename(file))
for directory in directories:
for file in glob.glob(os.path.join(settings.DOCUMENTS_DIRECTORY, os.path.join(settings.DOCUMENTS_DIRECTORY), directory, "*")):
if os.path.isfile(file):
file_name = os.path.basename(file)
if file_name == "Thumbs.db":
continue
document = {}
document['title'] = file_name.split(".")[0]
document['link'] = os.path.join('/documents_storage/{}/{}'.format(directory, file_name))
document['topic'] = directory
documents.append(document)
context['documents'] = documents
context['topics'] = sorted(directories)
return context
class ImportPortsFromGpx(View):
def get(self, request, *args, **kwargs):
return render(request, "import_ports_from_gpx_form.html")
def post(self, request, *args, **kwargs):
file = request.FILES['gpxfile']
file_name = file.name
file_content = file.read().decode('utf-8')
(created, modified, skipped, reports) = import_gpx_to_stations.import_gpx_to_stations(file_content)
template_information = {
'created': created,
'modified': modified,
'skipped': skipped,
'reports': reports,
'file_name': file_name
}
return render(request, "import_ports_from_gpx_exec.html", template_information)
class CoordinatesConversion(TemplateView):
def get(self, request, *args, **kwargs):
form = InputCoordinates()
return render(request, "coordinates_conversion.html", {"form": form})
def post(self, request, *args, **kwargs):
coordinates = request.POST['coordinates']
form = InputCoordinates(initial={'coordinates': coordinates})
template_information = {}
template_information['form'] = form
utils_coordinates.process(coordinates, template_information)
return render(request, "coordinates_conversion_exec.html", template_information)
class PositionFromDateTime(TemplateView):
def get(self, request, *args, **kwargs):
form = InputShipDateTime(initial={'ship_date_time': timezone.now})
return render(request, "position_from_date_time.html", {'form': form})
def post(self, request, *args, **kwargs):
ship_date_time = request.POST['ship_date_time']
ship_date_times = request.POST['ship_date_times']
form = InputShipDateTime(initial={'ship_date_time': ship_date_time,
'ship_date_times': ship_date_times})
template_information = find_locations.find_locations(ship_date_time, ship_date_times)
template_information['form'] = form
return render(request, "position_from_date_time_exec.html", template_information)
class ShipTimeToUtc(TemplateView):
def get(self, request, *args, **kwargs):
form = InputShipTimes(initial={'ship_date_times': datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")})
return render(request, "ship_time_to_utc.html", {'form': form})
def post(self, request, *args, **kwargs):
ship_date_times = request.POST['ship_date_times']
form = InputShipTimes(initial={'ship_date_times': ship_date_times})
template_information = {}
template_information['times'] = ship_date_times_to_utc(ship_date_times)
template_information['form'] = form
return render(request, "ship_time_to_utc_exec.html", template_information)
class MailState(TemplateView):
def get(self, request, *args, **kwargs):
s = subprocess.Popen("mailq", stdout=subprocess.PIPE)
mails = s.stdout.read()
return render(request, "mail_state.html", {'mails': mails})
class LatestImage(View):
def get(self, request):
filepath = settings.IMAGE_RELOAD_FILEPATH
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
class ImageReloaderView(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, "image_reloader.html")
def latest_ship_speed():
try:
gps = SamplingMethod.objects.get(name=settings.MAIN_GPS)
except ObjectDoesNotExist:
return None
velocities = GpvtgVelocity.objects.filter(device=gps).order_by('-date_time')
if velocities.exists():
speed = velocities[0]
return speed.ground_speed_kts
else:
return None
def ship_date_times_to_utc(ship_date_times):
output = []
for ship_date_time in ship_date_times.split("\n"):
ship_date_time = ship_date_time.strip()
try:
date_time = datetime.datetime.strptime(ship_date_time, "%Y-%m-%d %H:%M:%S")
message = ""
if date_time.date() == settings.DATE_TWO_DAYS.date():
message = "We had two days with the same date, unknown UTC"
elif date_time > datetime.datetime.now() + datetime.timedelta(days=1):
message = "Don't ask about the future..."
elif utils.set_utc(date_time) < Leg.objects.all().order_by("start_time")[0].start_time:
# This is an approximation - due to the timezones
message = "Don't ask about before the beginning of the voyage"
if message != "":
output.append({'ship_date_time': ship_date_time,
'utc_date_time': message,
'utc_julian_day': message
})
continue
ship_ahead_of_utc = main.models.TimeChange.objects.filter(Q(date_changed_utc__lte=date_time)).order_by('-date_changed_utc')
if len(ship_ahead_of_utc) > 0:
ship_ahead_of_utc_hours = int(ship_ahead_of_utc[0].difference_to_utc_after_change)
ahead_of_utc = datetime.timedelta(hours=ship_ahead_of_utc_hours)
date_time_utc = date_time - ahead_of_utc
else:
date_time_utc = "Unknown"
utc_julian_day = date_time_utc.strftime("%j")
except ValueError:
date_time_utc = '<p style="color:red"><b>Date in invalid format</b></p>'
utc_julian_day = '<p style="color:red"><b>Date in invalid format</b></p>'
output.append({'ship_date_time': ship_date_time,
'utc_date_time': date_time_utc,
'utc_julian_day': utc_julian_day
})
return output
class ContactDetailsListView(ListView):
model = ContactDetails
template_name = "list_of_contacts.html"
def get_context_data(self, **kwargs):
context = super(ContactDetailsListView, self).get_context_data(**kwargs)
return context
|
Tejal011089/digitales_erpnext
|
refs/heads/develop
|
erpnext/accounts/report/accounts_payable/accounts_payable.py
|
18
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, nowdate, flt, cstr
from frappe import msgprint, _
from erpnext.accounts.report.accounts_receivable.accounts_receivable import get_ageing_data
def execute(filters=None):
if not filters: filters = {}
supplier_naming_by = frappe.db.get_value("Buying Settings", None, "supp_master_name")
columns = get_columns(supplier_naming_by)
entries = get_gl_entries(filters)
account_map = dict(((r.name, r) for r in frappe.db.sql("""select acc.name,
supp.supplier_name, supp.name as supplier
from `tabAccount` acc, `tabSupplier` supp
where acc.master_type="Supplier" and supp.name=acc.master_name""", as_dict=1)))
entries_after_report_date = [[gle.voucher_type, gle.voucher_no]
for gle in get_gl_entries(filters, before_report_date=False)]
account_supplier_type_map = get_account_supplier_type_map()
voucher_detail_map = get_voucher_details()
# Age of the invoice on this date
age_on = getdate(filters.get("report_date")) > getdate(nowdate()) \
and nowdate() or filters.get("report_date")
data = []
for gle in entries:
if cstr(gle.against_voucher) == gle.voucher_no or not gle.against_voucher \
or [gle.against_voucher_type, gle.against_voucher] in entries_after_report_date \
or (gle.against_voucher_type == "Purchase Order"):
voucher_details = voucher_detail_map.get(gle.voucher_type, {}).get(gle.voucher_no, {})
invoiced_amount = gle.credit > 0 and gle.credit or 0
outstanding_amount = get_outstanding_amount(gle,
filters.get("report_date") or nowdate())
if abs(flt(outstanding_amount)) > 0.01:
paid_amount = invoiced_amount - outstanding_amount
row = [gle.posting_date, gle.account, gle.voucher_type, gle.voucher_no,
voucher_details.get("due_date", ""), voucher_details.get("bill_no", ""),
voucher_details.get("bill_date", ""), invoiced_amount,
paid_amount, outstanding_amount]
# Ageing
if filters.get("ageing_based_on") == "Due Date":
ageing_based_on_date = voucher_details.get("due_date", "")
else:
ageing_based_on_date = gle.posting_date
row += get_ageing_data(age_on, ageing_based_on_date, outstanding_amount) + \
[account_map.get(gle.account, {}).get("supplier") or ""]
if supplier_naming_by == "Naming Series":
row += [account_map.get(gle.account, {}).get("supplier_name") or ""]
row += [account_supplier_type_map.get(gle.account), gle.remarks]
data.append(row)
for i in range(0, len(data)):
data[i].insert(4, """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", data[i][2], data[i][3]]),))
return columns, data
def get_columns(supplier_naming_by):
columns = [
_("Posting Date") + ":Date:80", _("Account") + ":Link/Account:150", _("Voucher Type") + "::110",
_("Voucher No") + "::120", "::30", _("Due Date") + ":Date:80", _("Bill No") + "::80", _("Bill Date") + ":Date:80",
_("Invoiced Amount") + ":Currency:100", _("Paid Amount") + ":Currency:100",
_("Outstanding Amount") + ":Currency:100", _("Age") + ":Int:50", "0-30:Currency:100",
"30-60:Currency:100", "60-90:Currency:100", _("90-Above") + ":Currency:100",
_("Supplier") + ":Link/Supplier:150"
]
if supplier_naming_by == "Naming Series":
columns += ["Supplier Name::110"]
columns += ["Supplier Type:Link/Supplier Type:120", "Remarks::150"]
return columns
def get_gl_entries(filters, before_report_date=True):
conditions, supplier_accounts = get_conditions(filters, before_report_date)
gl_entries = []
gl_entries = frappe.db.sql("""select * from `tabGL Entry`
where docstatus < 2 %s order by posting_date, account""" %
(conditions), tuple(supplier_accounts), as_dict=1)
return gl_entries
def get_conditions(filters, before_report_date=True):
conditions = ""
if filters.get("company"):
conditions += " and company='%s'" % filters["company"].replace("'", "\'")
supplier_accounts = []
if filters.get("account"):
supplier_accounts = [filters["account"]]
else:
supplier_accounts = frappe.db.sql_list("""select name from `tabAccount`
where ifnull(master_type, '') = 'Supplier' and docstatus < 2 %s""" %
conditions, filters)
if supplier_accounts:
conditions += " and account in (%s)" % (", ".join(['%s']*len(supplier_accounts)))
else:
msgprint(_("No Supplier Accounts found. Supplier Accounts are identified based on 'Master Type' value in account record."), raise_exception=1)
if filters.get("report_date"):
if before_report_date:
conditions += " and posting_date<='%s'" % filters["report_date"]
else:
conditions += " and posting_date>'%s'" % filters["report_date"]
return conditions, supplier_accounts
def get_account_supplier_type_map():
account_supplier_type_map = {}
for each in frappe.db.sql("""select acc.name, supp.supplier_type from `tabSupplier` supp,
`tabAccount` acc where supp.name = acc.master_name group by acc.name"""):
account_supplier_type_map[each[0]] = each[1]
return account_supplier_type_map
def get_voucher_details():
voucher_details = {}
for dt in ["Purchase Invoice", "Journal Voucher"]:
voucher_details.setdefault(dt, frappe._dict())
for t in frappe.db.sql("""select name, due_date, bill_no, bill_date
from `tab%s`""" % dt, as_dict=1):
voucher_details[dt].setdefault(t.name, t)
return voucher_details
def get_outstanding_amount(gle, report_date):
payment_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry`
where account = %s and posting_date <= %s and against_voucher_type = %s
and against_voucher = %s and name != %s""",
(gle.account, report_date, gle.voucher_type, gle.voucher_no, gle.name))[0][0]
outstanding_amount = flt(gle.credit) - flt(gle.debit) - flt(payment_amount)
return outstanding_amount
|
ajoaoff/django
|
refs/heads/master
|
tests/postgres_tests/test_ranges.py
|
98
|
import datetime
import json
import unittest
from django import forms
from django.core import exceptions, serializers
from django.db import connection
from django.db.models import F
from django.test import TestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
def skipUnlessPG92(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestQueringWithRanges(TestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" />'
'<input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
|
fayf/pyload
|
refs/heads/stable
|
module/plugins/hoster/MultishareCz.py
|
12
|
# -*- coding: utf-8 -*-
import random
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class MultishareCz(SimpleHoster):
__name__ = "MultishareCz"
__type__ = "hoster"
__version__ = "0.42"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?multishare\.cz/stahnout/(?P<ID>\d+)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """MultiShare.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
SIZE_REPLACEMENTS = [(' ', '')]
CHECK_TRAFFIC = True
LEECH_HOSTER = True
INFO_PATTERN = ur'(?:<li>Název|Soubor): <strong>(?P<N>[^<]+)</strong><(?:/li><li|br)>Velikost: <strong>(?P<S>[^<]+)</strong>'
OFFLINE_PATTERN = ur'<h1>Stáhnout soubor</h1><p><strong>Požadovaný soubor neexistuje.</strong></p>'
def handle_free(self, pyfile):
self.download("http://www.multishare.cz/html/download_free.php", get={'ID': self.info['pattern']['ID']})
def handle_premium(self, pyfile):
self.download("http://www.multishare.cz/html/download_premium.php", get={'ID': self.info['pattern']['ID']})
def handle_multi(self, pyfile):
self.html = self.load('http://www.multishare.cz/html/mms_ajax.php', post={'link': pyfile.url})
self.check_info()
if not self.check_traffic_left():
self.fail(_("Not enough credit left to download file"))
self.download("http://dl%d.mms.multishare.cz/html/mms_process.php" % round(random.random() * 10000 * random.random()),
get={'u_ID' : self.acc_info['u_ID'],
'u_hash': self.acc_info['u_hash'],
'link' : pyfile.url},
disposition=True)
getInfo = create_getInfo(MultishareCz)
|
x1ah/Daily_scripts
|
refs/heads/master
|
Newtouch/atouch.py
|
2
|
#!/usr/bin/env python
# coding:utf-8
import os
import argparse
header_msg = {
'py': ("#!/usr/bin/env python\n"
"# coding:utf-8\n"),
'c': ("#include <stdio.h>\n\n"
"int main (void)\n"
"{\n return 0;\n}\n"),
'scm': ";;;\n",
"html": "<!DOCTYPE HTML>",
'm': '',
'sh': "#!/bin/bash\n",
'java': ''
}
class ArgsParser:
"""
parser the command arguments
"""
@staticmethod
def args_parser():
parser = argparse.ArgumentParser(
description="""
A simple script for add header message when create a new file.
"""
)
parser.add_argument('newfile')
args = parser.parse_args()
return args
class Atouch:
"""
Usage egs:
$ chmod +x atouch.py
$ ./atouch.py -h
$ ./atouch.py foo.py
$ ./atouch.py bar.c
"""
def is_file_exists(self, file_path):
return True if os.path.exists(file_path) else False
def write(self, newfile, model='w'):
newfile_attrs = args.newfile.split('.') # newfile's file type
with open(newfile, model) as nf:
if newfile_attrs[-1] in header_msg:
nf.write(header_msg[newfile_attrs[-1]])
def main(self, args):
file_path = os.path.join(os.path.dirname(__file__), args.newfile)
if self.is_file_exists(file_path):
print('\t{0} already exists...'.format(args.newfile))
elif file_path.endswith("/"):
print("No such directory")
else:
self.write(args.newfile)
if __name__ == '__main__':
args = ArgsParser.args_parser()
atouch = Atouch()
atouch.main(args)
|
singlerider/cloudbrain
|
refs/heads/master
|
cloudbrain/publishers/PipePublisher.py
|
6
|
import json
import sys, os
import os.path
import stat
from cloudbrain.publishers.PublisherInterface import Publisher
from threading import Lock
class PipePublisher(Publisher):
"""
Publisher implementation for writing data to pipe
"""
PIPE_WRITING_LOCKS = dict()
def __init__(self, device_name, device_id, metric_name, pipe_name=None):
super(PipePublisher, self).__init__(device_name, device_id, None)
self.metric_name = metric_name
self.pipe_name = pipe_name
def get_lock(self):
lock = PipePublisher.PIPE_WRITING_LOCKS.get(self.pipe_name, None)
if lock is None:
lock = Lock()
PipePublisher.PIPE_WRITING_LOCKS[self.pipe_name] = lock
return lock
def lock(self):
lock = self.get_lock()
lock.acquire(True)
def unlock(self):
lock = self.get_lock()
lock.release()
def publish(self, buffer_content):
key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
out = {"key": key, 'body': buffer_content}
to_write = json.dumps(out)
self.lock()
self.pipe.write(to_write)
self.pipe.write("\n")
self.pipe.flush()
self.unlock()
def connect(self):
self.lock()
if self.pipe_name is None:
self.pipe = sys.stdout
else:
if os.path.exists(self.pipe_name) and not stat.S_ISFIFO(os.stat(self.pipe_name).st_mode):
raise Exception("File '%s' exists and is not a named pipe." % self.pipe_name)
elif not os.path.exists(self.pipe_name):
os.mkfifo(self.pipe_name)
self.pipe = open(self.pipe_name, 'a')
self.unlock()
def disconnect(self):
if self.pipe_name is not None:
self.pipe.close()
os.remove(self.pipe_name)
|
maurofaccenda/ansible
|
refs/heads/devel
|
lib/ansible/modules/notification/twilio.py
|
36
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <matthew.makai@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through the Twilio messaging API.
notes:
- This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails.
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's Twilio account token found on the account page
required: true
auth_token:
description: user's Twilio authentication token
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
one or more phone numbers to send the text message to,
format +15551112222
required: true
from_number:
description:
the Twilio number to send the text message from, format +15551112222
required: true
media_url:
description:
a URL with a picture, video or sound clip to send with an MMS
(multimedia message) instead of a plain SMS
required: false
author: "Matt Makai (@makaimc)"
'''
EXAMPLES = '''
# send an SMS about the build status to (555) 303 5681
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: All servers with webserver role are now configured.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
delegate_to: localhost
# send an SMS to multiple phone numbers about the deployment
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: This server configuration is now complete.
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15553258899
to_number:
- +15551113232
- +12025551235
- +19735559010
delegate_to: localhost
# send an MMS to a single recipient with an update on the deployment
# and an image of the results
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: Deployment complete!
account_sid: ACXXXXXXXXXXXXXXXXX
auth_token: ACXXXXXXXXXXXXXXXXX
from_number: +15552014545
to_number: +15553035681
media_url: https://demo.twilio.com/logo.png
delegate_to: localhost
'''
# =======================================
# twilio module support methods
#
import urllib
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
to_number, media_url=None):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible"
data = {'From':from_number, 'To':to_number, 'Body':msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urllib.urlencode(data)
headers = {'User-Agent': AGENT,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = account_sid.replace('\n', '')
module.params['url_password'] = auth_token.replace('\n', '')
return fetch_url(module, URI, data=encoded_data, headers=headers)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True, no_log=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
media_url=dict(default=None, required=False),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
media_url = module.params['media_url']
if not isinstance(to_number, list):
to_number = [to_number]
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
if info['status'] not in [200, 201]:
body_message = "unknown error"
if 'body' in info:
body = json.loads(info['body'])
body_message = body['message']
module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
pedrobaeza/odoo
|
refs/heads/master
|
addons/product_extended/product_extended.py
|
59
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
class product_product(osv.osv):
_name = 'product.product'
_inherit = 'product.product'
def compute_price(self, cr, uid, ids, recursive=False, test=False, real_time_accounting = False, context=None):
'''
Will return test dict when the test = False
Multiple ids at once?
testdict is used to inform the user about the changes to be made
'''
testdict = {}
for prod_id in ids:
bom_obj = self.pool.get('mrp.bom')
bom_ids = bom_obj.search(cr, uid, [('product_id','=', prod_id), ('bom_line_ids', '!=', False)], context=context)
if bom_ids:
bom_id = bom_ids[0]
# In recursive mode, it will first compute the prices of child boms
if recursive:
#Search the products that are components of this bom of prod_id
boms = bom_obj.search(cr, uid, [('bom_id', '=', bom_id)], context=context)
#Call compute_price on these subproducts
prod_set = set([x.product_id.id for x in bom_obj.browse(cr, uid, boms, context=context)])
res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update(res)
#Use calc price to calculate and put the price on the product of the BoM if necessary
price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update({prod_id : price})
if test:
return testdict
else:
return True
def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None):
if context is None:
context={}
price = 0
uom_obj = self.pool.get("product.uom")
if bom.bom_line_ids:
for sbom in bom.bom_line_ids:
my_qty = sbom.bom_line_ids and 1.0 or sbom.product_qty
price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty
if bom.routing_id:
for wline in bom.routing_id.workcenter_lines:
wc = wline.workcenter_id
cycle = wline.cycle_nbr
hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
price += wc.costs_cycle * cycle + wc.costs_hour * hour
price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id)
#Convert on product UoM quantities
if price > 0:
price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id)
product = self.pool.get("product.product").browse(cr, uid, bom.product_id.id, context=context)
if not test:
if (product.valuation != "real_time" or not real_time_accounting):
self.write(cr, uid, [bom.product_id.id], {'standard_price' : price}, context=context)
else:
#Call wizard function here
wizard_obj = self.pool.get("stock.change.standard.price")
ctx = context.copy()
ctx.update({'active_id': bom.product_id.id})
wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx)
wizard_obj.change_price(cr, uid, [wiz_id], context=ctx)
return price
product_product()
class product_bom(osv.osv):
_inherit = 'mrp.bom'
_columns = {
'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False)
}
product_bom()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
manaris/jythonMusic
|
refs/heads/master
|
24. oscOut.py
|
1
|
# oscOut.py
from osc import *
oscOut = OscOut("localhost", 57110)
oscOut.sendMessage("/helloWorld")
oscOut.sendMessage("/test", 1, 2, 3)
oscOut.sendMessage("/itsFullOfStars", 1, 2.35, "wow!", True)
|
shanot/imp
|
refs/heads/develop
|
modules/domino/test/test_order.py
|
2
|
from __future__ import print_function
import IMP
import IMP.test
import IMP.domino
import IMP.core
class Tests(IMP.test.TestCase):
def _print_order(self, order, s):
for i in order:
print(s[order[i]].get_name(), end=' ')
def _test_global_min2(self):
"""Testing ordering"""
m = IMP.Model()
ps = []
pst = IMP.domino.ParticleStatesTable()
state = IMP.domino.IndexStates(10)
for i in range(0, 10):
ps.append(IMP.Particle(m))
pst.set_particle_states(ps[-1], state)
eqft = IMP.domino.EquivalenceSubsetFilterTable(pst)
ps.sort()
s = IMP.domino.Subset(ps)
order = IMP.domino.get_order(IMP.domino.Subset(ps),
[eqft])
print("order is", order)
print(s)
self._print_order(order, s)
print()
for i, oi in enumerate(order):
self.assertEqual(i, oi)
def test_global_min4(self):
"""Testing enumeration with ordering"""
m = IMP.Model()
ps = []
IMP.set_log_level(IMP.SILENT)
pst = IMP.domino.ParticleStatesTable()
state = IMP.domino.IndexStates(8)
for i in range(0, 5):
ps.append(IMP.Particle(m))
pst.set_particle_states(ps[-1], state)
eqft = IMP.domino.EquivalenceSubsetFilterTable(pst)
exft = IMP.domino.ExclusionSubsetFilterTable(pst)
ps.sort()
s = IMP.domino.Subset(ps)
sst = IMP.domino.BranchAndBoundAssignmentsTable(pst, [eqft, exft])
sst.set_log_level(IMP.VERBOSE)
pss = IMP.domino.PackedAssignmentContainer()
sst.load_assignments(s, pss)
ss = pss.get_assignments((0, pss.get_number_of_assignments()))
print(ss)
self.assertEqual(len(ss), 56)
def test_global_min5(self):
"""Testing enumeration with equiv excl ordering"""
m = IMP.Model()
ps = []
IMP.set_log_level(IMP.SILENT)
pst = IMP.domino.ParticleStatesTable()
state = IMP.domino.IndexStates(11)
for i in range(0, 10):
ps.append(IMP.Particle(m))
pst.set_particle_states(ps[-1], state)
eqft = IMP.domino.EquivalenceAndExclusionSubsetFilterTable(pst)
ps.sort()
s = IMP.domino.Subset(ps)
sst = IMP.domino.BranchAndBoundAssignmentsTable(pst, [eqft])
sst.set_log_level(IMP.VERBOSE)
pss = IMP.domino.PackedAssignmentContainer()
sst.load_assignments(s, pss)
ss = pss.get_assignments((0, pss.get_number_of_assignments()))
print(ss)
self.assertEqual(len(ss), 11)
def _test_global_min3(self):
"""Testing ordering with other node"""
m = IMP.Model()
ps = []
pst = IMP.domino.ParticleStatesTable()
state = IMP.domino.IndexStates(10)
for i in range(0, 4):
ps.append(IMP.Particle(m))
pst.set_particle_states(ps[-1], state)
statep = IMP.domino.IndexStates(8)
pp = IMP.Particle(m)
pst.set_particle_states(pp, statep)
eqft = IMP.domino.EquivalenceSubsetFilterTable(pst)
ps.sort()
s = IMP.domino.Subset(ps + [pp])
order = IMP.domino.get_order(s,
[eqft])
print("order is", order)
self._print_order(order, s)
for i, e in enumerate(order):
if i < 4:
self.assertEqual(s[order[i]], ps[i])
else:
pass
if __name__ == '__main__':
IMP.test.main()
|
BeATz-UnKNoWN/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/base/service.py
|
166
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GBaseService extends the GDataService to streamline Google Base operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.base
import atom
# URL to which all batch requests are sent.
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
class Error(Exception):
pass
class RequestError(Error):
pass
class GBaseService(gdata.service.GDataService):
"""Client for the Google Base service."""
def __init__(self, email=None, password=None, source=None,
server='base.google.com', api_key=None, additional_headers=None,
handler=None, **kwargs):
"""Creates a client for the Google Base service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
api_key: string (optional) The Google Base API key to use.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='gbase', source=source,
server=server, additional_headers=additional_headers, handler=handler,
**kwargs)
self.api_key = api_key
def _SetAPIKey(self, api_key):
if not isinstance(self.additional_headers, dict):
self.additional_headers = {}
self.additional_headers['X-Google-Key'] = api_key
def __SetAPIKey(self, api_key):
self._SetAPIKey(api_key)
def _GetAPIKey(self):
if 'X-Google-Key' not in self.additional_headers:
return None
else:
return self.additional_headers['X-Google-Key']
def __GetAPIKey(self):
return self._GetAPIKey()
api_key = property(__GetAPIKey, __SetAPIKey,
doc="""Get or set the API key to be included in all requests.""")
def Query(self, uri, converter=None):
"""Performs a style query and returns a resulting feed or entry.
Args:
uri: string The full URI which be queried. Examples include
'/base/feeds/snippets?bq=digital+camera',
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
'/base/feeds/items'
I recommend creating a URI using a query class.
converter: func (optional) A function which will be executed on the
server's response. Examples include GBaseItemFromString, etc.
Returns:
If converter was specified, returns the results of calling converter on
the server's response. If converter was not specified, and the result
was an Atom Entry, returns a GBaseItem, by default, the method returns
the result of calling gdata.service's Get method.
"""
result = self.Get(uri, converter=converter)
if converter:
return result
elif isinstance(result, atom.Entry):
return gdata.base.GBaseItemFromString(result.ToString())
return result
def QuerySnippetsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
def QueryItemsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
def QueryAttributesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
def QueryItemTypesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
def QueryLocalesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
def GetItem(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
def GetSnippet(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
def GetAttribute(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
def GetItemType(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
def GetLocale(self, uri):
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
def InsertItem(self, new_item, url_params=None, escape_params=True,
converter=None):
"""Adds an item to Google Base.
Args:
new_item: atom.Entry or subclass A new item which is to be added to
Google Base.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def DeleteItem(self, item_id, url_params=None, escape_params=True):
"""Removes an item with the specified ID from Google Base.
Args:
item_id: string The ID of the item to be deleted. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
url_params=url_params, escape_params=escape_params)
def UpdateItem(self, item_id, updated_item, url_params=None,
escape_params=True,
converter=gdata.base.GBaseItemFromString):
"""Updates an existing item.
Args:
item_id: string The ID of the item to be updated. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
updated_item: atom.Entry, subclass, or string, containing
the Atom Entry which will replace the base item which is
stored at the item_id.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Put(updated_item,
item_id, url_params=url_params, escape_params=escape_params,
converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def ExecuteBatch(self, batch_feed,
converter=gdata.base.GBaseItemFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
contain the desired CRUD operation and any necessary entry data.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is GBaseItemFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
class BaseQuery(gdata.service.Query):
def _GetBaseQuery(self):
return self['bq']
def _SetBaseQuery(self, base_query):
self['bq'] = base_query
bq = property(_GetBaseQuery, _SetBaseQuery,
doc="""The bq query parameter""")
|
TalShafir/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_policy.py
|
8
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_policy import Parameters
from library.modules.bigip_policy import ModuleManager
from library.modules.bigip_policy import SimpleManager
from library.modules.bigip_policy import ComplexManager
from library.modules.bigip_policy import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_policy import Parameters
from ansible.modules.network.f5.bigip_policy import ModuleManager
from ansible.modules.network.f5.bigip_policy import SimpleManager
from ansible.modules.network.f5.bigip_policy import ComplexManager
from ansible.modules.network.f5.bigip_policy import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_none_strategy(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy is None
def test_module_parameters_with_strategy_no_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Common/foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_different_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Foo/bar',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Foo/bar'
def test_api_parameters(self):
args = dict(
name='foo',
description='asdf asdf asdf',
strategy='/Common/asdf'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/asdf'
class TestSimpleTrafficPolicyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_policy(self, *args):
set_module_args(dict(
name="Policy-Foo",
state='present',
strategy='best',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = SimpleManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
|
ryano144/intellij-community
|
refs/heads/master
|
python/lib/Lib/distutils/tests/test_dist.py
|
88
|
"""Tests for distutils.dist."""
import distutils.cmd
import distutils.dist
import os
import shutil
import StringIO
import sys
import tempfile
import unittest
from test.test_support import TESTFN
class test_dist(distutils.cmd.Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(distutils.dist.Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(unittest.TestCase):
def setUp(self):
self.argv = sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv[:] = self.argv
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assert_(isinstance(cmd, test_dist))
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
f = open(TESTFN, "w")
try:
print >>f, "[global]"
print >>f, "command_packages = foo.bar, splat"
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
finally:
os.unlink(TESTFN)
class MetadataTestCase(unittest.TestCase):
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = distutils.dist.Distribution(attrs)
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.0" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("requires:" not in meta.lower())
self.assert_("obsoletes:" not in meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("requires:" not in meta.lower())
self.assert_("obsoletes:" not in meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("Requires: other" in meta)
self.assert_("Requires: another (==1.0)" in meta)
self.assert_("obsoletes:" not in meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = distutils.dist.Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assert_("Metadata-Version: 1.1" in meta)
self.assert_("provides:" not in meta.lower())
self.assert_("requires:" not in meta.lower())
self.assert_("Obsoletes: other" in meta)
self.assert_("Obsoletes: another (<1.0)" in meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError,
distutils.dist.Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = StringIO.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
|
araseyuta/Newsstand-analytics
|
refs/heads/master
|
gdata/tlslite/utils/Python_RSAKey.py
|
239
|
"""Pure-Python RSA implementation."""
from cryptomath import *
import xmltools
from ASN1Parser import ASN1Parser
from RSAKey import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def hash(self):
s = self.writeXMLPublicKey('\t\t')
return hashAndBase64(s.strip())
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def write(self, indent=''):
if self.d:
s = indent+'<privateKey xmlns="http://trevp.net/rsa">\n'
else:
s = indent+'<publicKey xmlns="http://trevp.net/rsa">\n'
s += indent+'\t<n>%s</n>\n' % numberToBase64(self.n)
s += indent+'\t<e>%s</e>\n' % numberToBase64(self.e)
if self.d:
s += indent+'\t<d>%s</d>\n' % numberToBase64(self.d)
s += indent+'\t<p>%s</p>\n' % numberToBase64(self.p)
s += indent+'\t<q>%s</q>\n' % numberToBase64(self.q)
s += indent+'\t<dP>%s</dP>\n' % numberToBase64(self.dP)
s += indent+'\t<dQ>%s</dQ>\n' % numberToBase64(self.dQ)
s += indent+'\t<qInv>%s</qInv>\n' % numberToBase64(self.qInv)
s += indent+'</privateKey>'
else:
s += indent+'</publicKey>'
#Only add \n if part of a larger structure
if indent != '':
s += '\n'
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits/2, False)
q = getRandomPrime(bits/2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 3L #Needed to be long, for Java
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
start = s.find("-----BEGIN PRIVATE KEY-----")
if start != -1:
end = s.find("-----END PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parsePKCS8(bytes)
else:
start = s.find("-----BEGIN RSA PRIVATE KEY-----")
if start != -1:
end = s.find("-----END RSA PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parseSSLeay(bytes)
raise SyntaxError("Missing PEM Prefix")
parsePEM = staticmethod(parsePEM)
def parseXML(s):
element = xmltools.parseAndStripWhitespace(s)
return Python_RSAKey._parseXML(element)
parseXML = staticmethod(parseXML)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
def _parseXML(element):
try:
xmltools.checkName(element, "privateKey")
except SyntaxError:
xmltools.checkName(element, "publicKey")
#Parse attributes
xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z")
xmltools.checkNoMoreAttributes(element)
#Parse public values (<n> and <e>)
n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx))
e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx))
d = 0
p = 0
q = 0
dP = 0
dQ = 0
qInv = 0
#Parse private values, if present
if element.childNodes.length>=3:
d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx))
p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx))
q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx))
dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx))
dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx))
qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx))
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseXML = staticmethod(_parseXML)
|
emedinaa/contentbox
|
refs/heads/master
|
third_party/modeltranslation/tests/test_settings.py
|
15
|
"""
Get test settings in dict format (for use with settings_override).
"""
from . import settings as _settings
TEST_SETTINGS = dict((k, getattr(_settings, k)) for k in dir(_settings) if k == k.upper())
|
madafoo/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/generator-output/gyptest-copies.py
|
66
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies with --generator-output using an explicit build
target of 'all'.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('copies'), False)
test.run_gyp('copies.gyp',
'--generator-output=' + test.workpath('gypfiles'),
'-G', 'xcode_ninja_target_pattern=^(?!copies_null)',
chdir='copies')
test.writable(test.workpath('copies'), True)
test.relocate('copies', 'relocate/copies')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/copies'), False)
test.writable(test.workpath('relocate/copies/build'), True)
test.writable(test.workpath('relocate/copies/copies-out'), True)
test.writable(test.workpath('relocate/copies/subdir/build'), True)
test.writable(test.workpath('relocate/copies/subdir/copies-out'), True)
test.build('copies.gyp', test.ALL, chdir='relocate/gypfiles')
test.must_match(['relocate', 'copies', 'copies-out', 'file1'],
"file1 contents\n")
if test.format == 'xcode':
chdir = 'relocate/copies/build'
elif test.format in ['make', 'ninja', 'xcode-ninja', 'cmake']:
chdir = 'relocate/gypfiles/out'
else:
chdir = 'relocate/gypfiles'
test.must_match([chdir, 'Default', 'copies-out', 'file2'], "file2 contents\n")
test.must_match(['relocate', 'copies', 'subdir', 'copies-out', 'file3'],
"file3 contents\n")
if test.format == 'xcode':
chdir = 'relocate/copies/subdir/build'
elif test.format in ['make', 'ninja', 'xcode-ninja', 'cmake']:
chdir = 'relocate/gypfiles/out'
else:
chdir = 'relocate/gypfiles'
test.must_match([chdir, 'Default', 'copies-out', 'file4'], "file4 contents\n")
test.pass_test()
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/apache_beam/io/gcp/datastore/v1new/helper_test.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for datastore helper."""
from __future__ import absolute_import
import unittest
import mock
# Protect against environments where apitools library is not available.
try:
from apache_beam.io.gcp.datastore.v1new import helper
from apache_beam.testing.test_utils import patch_retry
from google.api_core import exceptions
# TODO(BEAM-4543): Remove TypeError once googledatastore dependency is removed.
except (ImportError, TypeError):
helper = None
@unittest.skipIf(helper is None, 'GCP dependencies are not installed')
class HelperTest(unittest.TestCase):
def setUp(self):
self._mock_datastore = mock.MagicMock()
patch_retry(self, helper)
def test_write_mutations_no_errors(self):
mock_batch = mock.MagicMock()
mock_throttler = mock.MagicMock()
rpc_stats_callback = mock.MagicMock()
mock_throttler.throttle_request.return_value = []
helper.write_mutations(mock_batch, mock_throttler, rpc_stats_callback)
rpc_stats_callback.assert_has_calls([
mock.call(successes=1),
])
def test_write_mutations_throttle_delay_retryable_error(self):
mock_batch = mock.MagicMock()
mock_batch.commit.side_effect = [exceptions.DeadlineExceeded('retryable'),
None]
mock_throttler = mock.MagicMock()
rpc_stats_callback = mock.MagicMock()
# First try: throttle once [True, False]
# Second try: no throttle [False]
mock_throttler.throttle_request.side_effect = [True, False, False]
helper.write_mutations(mock_batch, mock_throttler, rpc_stats_callback,
throttle_delay=0)
rpc_stats_callback.assert_has_calls([
mock.call(successes=1),
mock.call(throttled_secs=mock.ANY),
mock.call(errors=1),
], any_order=True)
self.assertEqual(3, rpc_stats_callback.call_count)
def test_write_mutations_non_retryable_error(self):
mock_batch = mock.MagicMock()
mock_batch.commit.side_effect = [
exceptions.InvalidArgument('non-retryable'),
]
mock_throttler = mock.MagicMock()
rpc_stats_callback = mock.MagicMock()
mock_throttler.throttle_request.return_value = False
with self.assertRaises(exceptions.InvalidArgument):
helper.write_mutations(mock_batch, mock_throttler, rpc_stats_callback,
throttle_delay=0)
rpc_stats_callback.assert_called_once_with(errors=1)
if __name__ == '__main__':
unittest.main()
|
guiquanz/httpie
|
refs/heads/master
|
tests/test_ssl.py
|
46
|
import os
import pytest
import pytest_httpbin.certs
from requests.exceptions import SSLError
from httpie import ExitStatus
from utils import http, HTTP_OK, TESTS_ROOT
CLIENT_CERT = os.path.join(TESTS_ROOT, 'client_certs', 'client.crt')
CLIENT_KEY = os.path.join(TESTS_ROOT, 'client_certs', 'client.key')
CLIENT_PEM = os.path.join(TESTS_ROOT, 'client_certs', 'client.pem')
# We test against a local httpbin instance which uses a self-signed cert.
# Requests without --verify=<CA_BUNDLE> will fail with a verification error.
# See: https://github.com/kevin1024/pytest-httpbin#https-support
CA_BUNDLE = pytest_httpbin.certs.where()
class TestClientSSLCertHandling(object):
def test_cert_file_not_found(self, httpbin_secure):
r = http(httpbin_secure + '/get',
'--verify', CA_BUNDLE,
'--cert', '/__not_found__',
error_exit_ok=True)
assert r.exit_status == ExitStatus.ERROR
assert 'No such file or directory' in r.stderr
def test_cert_file_invalid(self, httpbin_secure):
with pytest.raises(SSLError):
http(httpbin_secure + '/get',
'--verify', CA_BUNDLE,
'--cert', __file__)
def test_cert_ok_but_missing_key(self, httpbin_secure):
with pytest.raises(SSLError):
http(httpbin_secure + '/get',
'--verify', CA_BUNDLE,
'--cert', CLIENT_CERT)
def test_cert_and_key(self, httpbin_secure):
r = http(httpbin_secure + '/get',
'--verify', CA_BUNDLE,
'--cert', CLIENT_CERT,
'--cert-key', CLIENT_KEY)
assert HTTP_OK in r
def test_cert_pem(self, httpbin_secure):
r = http(httpbin_secure + '/get',
'--verify', CA_BUNDLE,
'--cert', CLIENT_PEM)
assert HTTP_OK in r
class TestServerSSLCertHandling(object):
def test_self_signed_server_cert_by_default_raises_ssl_error(
self, httpbin_secure):
with pytest.raises(SSLError):
http(httpbin_secure.url + '/get')
def test_verify_no_OK(self, httpbin_secure):
r = http(httpbin_secure.url + '/get', '--verify=no')
assert HTTP_OK in r
def test_verify_custom_ca_bundle_path(
self, httpbin_secure):
r = http(httpbin_secure.url + '/get', '--verify', CA_BUNDLE)
assert HTTP_OK in r
def test_verify_custom_ca_bundle_invalid_path(self, httpbin_secure):
with pytest.raises(SSLError):
http(httpbin_secure.url + '/get', '--verify', '/__not_found__')
def test_verify_custom_ca_bundle_invalid_bundle(self, httpbin_secure):
with pytest.raises(SSLError):
http(httpbin_secure.url + '/get', '--verify', __file__)
|
marratj/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/junos/junos_banner.py
|
22
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_banner
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage multiline banners on Juniper JUNOS devices
description:
- This will configure both login and motd banners on network devices.
It allows playbooks to add or remote
banner text from the active running configuration.
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device. Value C(login) indicates
system login message prior to authenticating, C(motd) is login
announcement after successful authentication.
required: true
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
"""
EXAMPLES = """
- name: configure the login banner
junos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
junos_banner:
banner: motd
state: absent
- name: deactivate the motd banner
junos_banner:
banner: motd
state: present
active: False
- name: activate the motd banner
junos_banner:
banner: motd
state: present
active: True
- name: Configure banner from file
junos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system login]
+ message \"this is my login banner\";
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system/login'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('text', {'xpath': 'message' if module.params['banner'] == 'login' else 'announcement', 'leaf_only': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
|
adelcast/opkg
|
refs/heads/master
|
tests/core/11_virtual_conflicts_second.py
|
7
|
#! /usr/bin/env python
#
# Install a package 'x' which PROVIDES 'v'. Then try to install 'y' which
# PROVIDES and CONFLICTS 'v', indicating that no other provider of the virtual
# package 'v' should be installed at the same time as 'y'.
import os
import opk, cfg, opkgcl
opk.regress_init()
o = opk.OpkGroup()
o.add(Package="x", Provides="v")
o.add(Package="y", Provides="v", Conflicts="v")
o.write_opk()
o.write_list()
opkgcl.update()
opkgcl.install("x")
if not opkgcl.is_installed("x"):
opk.fail("Package 'x' installed but reports as not installed.")
# Now try to install "y", which should fail
opkgcl.install("y")
if opkgcl.is_installed("y"):
opk.xfail("Package 'y' installed despite conflict with 'v' provided by 'x'.")
|
ciex/motor
|
refs/heads/master
|
lib/werkzeug/testsuite/formparser.py
|
63
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.formparser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the form parsing facilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from StringIO import StringIO
from os.path import join, dirname
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import formparser
from werkzeug.test import create_environ, Client
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import RequestEntityTooLarge
@Request.application
def form_data_consumer(request):
result_object = request.args['object']
if result_object == 'text':
return Response(repr(request.form['text']))
f = request.files[result_object]
return Response('\n'.join((
repr(f.filename),
repr(f.name),
repr(f.content_type),
f.stream.read()
)))
def get_contents(filename):
f = file(filename, 'rb')
try:
return f.read()
finally:
f.close()
class FormParserTestCase(WerkzeugTestCase):
def test_limiting(self):
data = 'foo=Hello+World&bar=baz'
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_content_length = 400
self.assert_equal(req.form['foo'], 'Hello World')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 400
self.assert_equal(req.form['foo'], 'Hello World')
data = ('--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
'Hello World\r\n'
'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
'bar=baz\r\n--foo--')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 4
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 400
self.assert_equal(req.form['foo'], 'Hello World')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 400
self.assert_equal(req.form['foo'], 'Hello World')
def test_parse_form_data_put_without_content(self):
# A PUT without a Content-Type header returns empty data
# Both rfc1945 and rfc2616 (1.0 and 1.1) say "Any HTTP/[1.0/1.1] message
# containing an entity-body SHOULD include a Content-Type header field
# defining the media type of that body." In the case where either
# headers are omitted, parse_form_data should still work.
env = create_environ('/foo', 'http://example.org/', method='PUT')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_equal(stream.read(), '')
self.assert_equal(len(form), 0)
self.assert_equal(len(files), 0)
def test_parse_form_data_get_without_content(self):
env = create_environ('/foo', 'http://example.org/', method='GET')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_equal(stream.read(), '')
self.assert_equal(len(form), 0)
self.assert_equal(len(files), 0)
def test_large_file(self):
data = 'x' * (1024 * 600)
req = Request.from_values(data={'foo': (StringIO(data), 'test.txt')},
method='POST')
# make sure we have a real file here, because we expect to be
# on the disk. > 1024 * 500
self.assert_(isinstance(req.files['foo'].stream, file))
class MultiPartTestCase(WerkzeugTestCase):
def test_basic(self):
resources = join(dirname(__file__), 'multipart')
client = Client(form_data_consumer, Response)
repository = [
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', [
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
], u'example text'),
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', [
(u'accept.png', 'file1', 'image/png', 'file1.png'),
(u'add.png', 'file2', 'image/png', 'file2.png')
], u'--long text\r\n--with boundary\r\n--lookalikes--'),
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', [
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
], u'blafasel öäü'),
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', [
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
], u'this is another text with ümläüts'),
('ie6-2png1txt', '---------------------------7d91b03a20128', [
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
], u'ie6 sucks :-/')
]
for name, boundary, files, text in repository:
folder = join(resources, name)
data = get_contents(join(folder, 'request.txt'))
for filename, field, content_type, fsname in files:
response = client.post('/?object=' + field, data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
lines = response.data.split('\n', 3)
self.assert_equal(lines[0], repr(filename))
self.assert_equal(lines[1], repr(field))
self.assert_equal(lines[2], repr(content_type))
self.assert_equal(lines[3], get_contents(join(folder, fsname)))
response = client.post('/?object=text', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
self.assert_equal(response.data, repr(text))
def test_ie7_unc_path(self):
client = Client(form_data_consumer, Response)
data_file = join(dirname(__file__), 'multipart', 'ie7_full_path_request.txt')
data = get_contents(data_file)
boundary = '---------------------------7da36d1b4a0164'
response = client.post('/?object=cb_file_upload_multiple', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary, content_length=len(data))
lines = response.data.split('\n', 3)
self.assert_equal(lines[0],
repr(u'Sellersburg Town Council Meeting 02-22-2010doc.doc'))
def test_end_of_file(self):
# This test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Type: text/plain\r\n\r\n'
'file contents and no end'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_(not data.files)
self.assert_(not data.form)
def test_broken(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Transfer-Encoding: base64\r\n'
'Content-Type: text/plain\r\n\r\n'
'broken base 64'
'--foo--'
)
_, form, files = formparser.parse_form_data(create_environ(data=data,
method='POST', content_type='multipart/form-data; boundary=foo'))
self.assert_(not files)
self.assert_(not form)
self.assert_raises(ValueError, formparser.parse_form_data,
create_environ(data=data, method='POST',
content_type='multipart/form-data; boundary=foo'),
silent=False)
def test_file_no_content_type(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
'file contents\r\n--foo--'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(data.files['test'].filename, 'test.txt')
self.assert_equal(data.files['test'].read(), 'file contents')
def test_extra_newline(self):
# this test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
'\r\n\r\n--foo\r\n'
'Content-Disposition: form-data; name="foo"\r\n\r\n'
'a string\r\n'
'--foo--'
)
data = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_(not data.files)
self.assert_equal(data.form['foo'], 'a string')
def test_headers(self):
data = ('--foo\r\n'
'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
'X-Custom-Header: blah\r\n'
'Content-Type: text/plain; charset=utf-8\r\n\r\n'
'file contents, just the contents\r\n'
'--foo--')
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
foo = req.files['foo']
self.assert_equal(foo.mimetype, 'text/plain')
self.assert_equal(foo.mimetype_params, {'charset': 'utf-8'})
self.assert_equal(foo.headers['content-type'], foo.content_type)
self.assert_equal(foo.content_type, 'text/plain; charset=utf-8')
self.assert_equal(foo.headers['x-custom-header'], 'blah')
def test_nonstandard_line_endings(self):
for nl in '\n', '\r', '\r\n':
data = nl.join((
'--foo',
'Content-Disposition: form-data; name=foo',
'',
'this is just bar',
'--foo',
'Content-Disposition: form-data; name=bar',
'',
'blafasel',
'--foo--'
))
req = Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
self.assert_equal(req.form['foo'], 'this is just bar')
self.assert_equal(req.form['bar'], 'blafasel')
def test_failures(self):
def parse_multipart(stream, boundary, content_length):
parser = formparser.MultiPartParser(content_length)
return parser.parse(stream, boundary, content_length)
self.assert_raises(ValueError, parse_multipart, StringIO(''), '', 0)
self.assert_raises(ValueError, parse_multipart, StringIO(''), 'broken ', 0)
data = '--foo\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
data = '--foo\r\nContent-Disposition: form-field; name=foo\r\n' \
'Content-Transfer-Encoding: base64\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
data = '--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\nHello World\r\n'
self.assert_raises(ValueError, parse_multipart, StringIO(data), 'foo', len(data))
x = formparser.parse_multipart_headers(['foo: bar\r\n', ' x test\r\n'])
self.assert_equal(x['foo'], 'bar\n x test')
self.assert_raises(ValueError, formparser.parse_multipart_headers,
['foo: bar\r\n', ' x test'])
def test_bad_newline_bad_newline_assumption(self):
class ISORequest(Request):
charset = 'latin1'
contents = 'U2vlbmUgbORu'
data = '--foo\r\nContent-Disposition: form-data; name="test"\r\n' \
'Content-Transfer-Encoding: base64\r\n\r\n' + \
contents + '\r\n--foo--'
req = ISORequest.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(req.form['test'], u'Sk\xe5ne l\xe4n')
class InternalFunctionsTestCase(WerkzeugTestCase):
def test_lien_parser(self):
assert formparser._line_parse('foo') == ('foo', False)
assert formparser._line_parse('foo\r\n') == ('foo', True)
assert formparser._line_parse('foo\r') == ('foo', True)
assert formparser._line_parse('foo\n') == ('foo', True)
def test_find_terminator(self):
lineiter = iter('\n\n\nfoo\nbar\nbaz'.splitlines(True))
find_terminator = formparser.MultiPartParser()._find_terminator
line = find_terminator(lineiter)
assert line == 'foo'
assert list(lineiter) == ['bar\n', 'baz']
assert find_terminator([]) == ''
assert find_terminator(['']) == ''
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FormParserTestCase))
suite.addTest(unittest.makeSuite(MultiPartTestCase))
suite.addTest(unittest.makeSuite(InternalFunctionsTestCase))
return suite
|
andrewsosa/hackfsu_com
|
refs/heads/master
|
api/api/views/hacker/get/__init__.py
|
2
|
from .profile import ProfileView
from . import csv
|
hooting/show-me-the-code-python
|
refs/heads/master
|
ddkangfu/0003/0003.py
|
40
|
#coding=utf-8
import uuid
import redis
"""
003, 将 0001 题生成的 200 个激活码(或者优惠券)保存到 **Redis** 非关系型数据库中.
"""
def get_redis_instance(host='localhost', port=6379):
return redis.StrictRedis(host=host, port=port)
def generate_activation_code(count):
code_list = []
for i in xrange(count):
code = str(uuid.uuid4()).replace('-', '').upper()
if not code in code_list:
code_list.append(code)
return code_list
def store_to_redise(codes):
if codes:
cache = get_redis_instance()
try:
cache.set('code:count', len(codes))
for i in xrange(len(codes)):
cache.set('code:{0}'.format(i), codes[i])
cache.save()
return True
except:
print 'Can not connect to redis server !!!'
return False
def print_activation_code():
cache = get_redis_instance()
try:
count = cache.get('code:count')
count = 0 if count is None else int(count)
for i in xrange(count):
print cache.get('code:%d'%i)
except:
print 'Can not connect to redis server !!!'
if __name__ == "__main__":
code_list = generate_activation_code(200)
if store_to_redise(code_list):
print_activation_code()
|
pwarren/AGDeviceControl
|
refs/heads/master
|
agdevicecontrol/thirdparty/site-packages/linux2/twisted/cred/identity.py
|
19
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""DEPRECATED.
Base authentication mechanisms for Twisted.
Maintainer: U{Glyph Lefkowitz<mailto:glyph@twistedmatrix.com>}
Stability: semi-stable
Future Plans: There needs to be more pluggable support for different, disparate
authentication mechanisms being supported by the same Identity as long as it
supports the appropriate persistent data-storage fields. This will likely be
accomplished with Adapters and possibly Componentized, although it may just be
the addition of more methods in the base Identity.
"""
# System Imports
import md5, types, sys, warnings
# Twisted Imports
from twisted.python import failure
from twisted.internet import defer
# Sibling Imports
from util import respond
from util import challenge
from error import Unauthorized, KeyNotFound
class Identity:
"""An identity, with different methods for verification.
An identity represents a user's permissions with a particular
application. It is a username, a password, and a collection of
Perspective/Service name pairs, each of which is a perspective
that this identity is allowed to access.
"""
hashedPassword = None
def __init__(self, name, authorizer):
"""Create an identity.
I must have a name, and a backreference to the Application that the
Keys on my keyring make reference to.
"""
warnings.warn("Identities are deprecated, switch to credentialcheckers etc.",
category=DeprecationWarning, stacklevel=2)
if not isinstance(name, types.StringType):
raise TypeError
from twisted.internet import app
if isinstance(authorizer, app.Application):
authorizer = authorizer.authorizer
self.name = name
self.authorizer = authorizer
self.keyring = {}
def upgradeToVersion2(self):
self.authorizer = self.application.authorizer
del self.application
def addKeyForPerspective(self, perspective):
"""Add a key for the given perspective.
"""
perspectiveName = perspective.getPerspectiveName()
serviceName = perspective.service.getServiceName()
self.addKeyByString(serviceName, perspectiveName)
def addKeyByString(self, serviceName, perspectiveName):
"""Put a key on my keyring.
This key will give me a token to access to some service in the
future.
"""
self.keyring[(serviceName, perspectiveName)] = 1
def requestPerspectiveForService(self, serviceName):
"""Get the first available perspective for a given service.
"""
keys = self.keyring.keys()
keys.sort()
for serviceN, perspectiveN in keys:
if serviceN == serviceName:
return self.requestPerspectiveForKey(serviceName, perspectiveN)
return defer.fail("No such perspective.")
def requestPerspectiveForKey(self, serviceName, perspectiveName):
"""Get a perspective request (a Deferred) for the given key.
If this identity does not have access to the given C{(serviceName,
perspectiveName)} pair, I will raise L{KeyNotFound<error.KeyNotFound>}.
"""
try:
check = self.keyring[(serviceName, perspectiveName)]
except KeyError:
e = KeyNotFound(serviceName, perspectiveName)
return defer.fail(failure.Failure(e, KeyNotFound,
sys.exc_info()[2]))
return self.authorizer.getServiceNamed(serviceName).getPerspectiveForIdentity(perspectiveName, self)
def getAllKeys(self):
"""Returns a list of all services and perspectives this identity can connect to.
This returns a sequence of keys.
"""
return self.keyring.keys()
def removeKey(self, serviceName, perspectiveName):
"""Remove a key from my keyring.
If this key is not present, raise KeyError.
"""
del self.keyring[(serviceName, perspectiveName)]
def save(self):
"""Persist this Identity to the authorizer.
"""
return self.authorizer.addIdentity(self)
### Authentication Mechanisms
def setPassword(self, plaintext):
if plaintext is None:
self.hashedPassword = None
else:
self.hashedPassword = md5.new(plaintext).digest()
def setAlreadyHashedPassword(self, cyphertext):
"""(legacy) Set a password for this identity, already md5 hashed.
"""
self.hashedPassword = cyphertext
def challenge(self):
"""I return some random data.
This is a method in addition to the module-level function
because it is anticipated that we will want to change this
to store salted passwords.
"""
return challenge()
def verifyPassword(self, challenge, hashedPassword):
"""Verify a challenge/response password.
"""
req = defer.Deferred()
if self.hashedPassword is None:
# no password was set, so we can't log in
req.errback(Unauthorized("account is disabled"))
return req
md = md5.new()
md.update(self.hashedPassword)
md.update(challenge)
correct = md.digest()
if hashedPassword == correct:
req.callback("password verified")
else:
req.errback(Unauthorized("incorrect password"))
return req
def verifyPlainPassword(self, plaintext):
"""Verify plain text password.
This is insecure, but necessary to support legacy protocols such
as IRC, POP3, HTTP, etc.
"""
req = defer.Deferred()
if self.hashedPassword is None:
# no password was set, so we can't log in
req.errback(Unauthorized("account is disabled"))
return req
md = md5.new()
md.update(plaintext)
userPass = md.digest()
if userPass == self.hashedPassword:
req.callback("password verified")
else:
req.errback(Unauthorized("incorrect password"))
return req
def __repr__(self):
return "<%s %r at 0x%x>" % (self.__class__, self.name, id(self))
# TODO: service discovery through listing of self.keyring.
|
hungtt57/matchmaker
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/gis/geoip/base.py
|
106
|
import os
import re
from ctypes import c_char_p
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name,
GeoIP_database_info, GeoIP_delete, GeoIP_lib_version, GeoIP_open,
GeoIP_record_by_addr, GeoIP_record_by_name,
)
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.encoding import force_bytes
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Returns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
# #### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
|
C-Blu/npyscreen
|
refs/heads/master
|
npyscreen/wgtexttokens.py
|
15
|
import curses
import sys
from . import wgwidget
from . import wgtextbox
from . import wgtitlefield
class TextTokens(wgtextbox.Textfield,wgwidget.Widget):
"""This is an experiemental widget"""
# NB IT DOES NOT CURRENTLY SUPPORT THE HIGHLIGHTING COLORS
# OF THE TEXTFIELD CLASS.
def __init__(self, *args, **keywords):
super(TextTokens, self).__init__(*args, **keywords)
self.begin_at = 0 # which token to begin display with
self.maximum_string_length = self.width - 2
self.left_margin = 0
self.cursor_position = 0
self.important = False
self.highlight = False
self.show_bold = False
def find_cursor_offset_on_screen(self, position):
index = self.begin_at
offset = 0
while index < position:
offset += len(self.decode_token(self.value[index]))
index += 1
return offset - self.begin_at # I don't quite understand
# why the - self.begin_at is needed
# but without it the cursor and screen
# get out of sync
def decode_token(self, tk):
r = ''.join(tk)
if len(r) > 1:
r = ' [' + r + '] '
if isinstance(r, bytes):
r = r.decode(self.encoding, 'replace')
return r
# text and highlighting generator.
def get_literal_text_and_highlighting_generator(self, start_at=0,):
# could perform initialization here.
index = start_at
string_length = 0
output = ''
while string_length <= self.maximum_string_length and len(self.value) > index:
token_output = self.decode_token(self.value[index])
if isinstance(token_output, bytes):
token_output = token_output.decode(self.encoding, 'replace')
highlighting = [curses.A_NORMAL for c in token_output]
yield(token_output, highlighting)
index += 1
def get_literal_text_to_display(self, start_at=0,):
g = self.get_literal_text_and_highlighting_generator(start_at=start_at)
txt = []
highlighting = []
for i in g:
txt += i[0]
highlighting += i[1]
return txt, highlighting
def update(self, clear=True, cursor=True):
if clear: self.clear()
if self.begin_at < 0: self.begin_at = 0
if self.left_margin >= self.maximum_string_length:
raise ValueError
if self.cursor_position < 0:
self.cursor_position = 0
if self.cursor_position > len(self.value):
self.cursor_position = len(self.value)
if self.cursor_position < self.begin_at:
self.begin_at = self.cursor_position
while self.find_cursor_offset_on_screen(self.cursor_position) > \
self.find_cursor_offset_on_screen(self.begin_at) + \
self.maximum_string_length - self.left_margin -1: # -1:
self.begin_at += 1
text, highlighting = self.get_literal_text_to_display(start_at=self.begin_at)
if self.do_colors():
if self.important:
color = self.parent.theme_manager.findPair(self, 'IMPORTANT') | curses.A_BOLD
else:
color = self.parent.theme_manager.findPair(self, self.color)
if self.show_bold:
color = color | curses.A_BOLD
if self.highlight:
if not self.editing:
color = color | curses.A_STANDOUT
else:
color = color | curses.A_UNDERLINE
highlighting = [color for c in highlighting if c == curses.A_NORMAL]
else:
color = curses.A_NORMAL
if self.important or self.show_bold:
color = color | curses.A_BOLD
if self.important:
color = color | curses.A_UNDERLINE
if self.highlight:
if not self.editing:
color = color | curses.A_STANDOUT
else:
color = color | curses.A_UNDERLINE
highlighting = [color for c in highlighting if c == curses.A_NORMAL]
self._print(text, highlighting)
if self.editing and cursor:
self.print_cursor()
def _print(self, text, highlighting):
self.add_line(self.rely,
self.relx + self.left_margin,
text,
highlighting,
self.maximum_string_length - self.left_margin
)
def print_cursor(self):
_cur_loc_x = self.cursor_position - self.begin_at + self.relx + self.left_margin
try:
char_under_cur = self.decode_token(self.value[self.cursor_position]) #use the real value
char_under_cur = self.safe_string(char_under_cur)
except IndexError:
char_under_cur = ' '
if isinstance(char_under_cur, bytes):
char_under_cur = char_under_cur.decode(self.encoding, 'replace')
offset = self.find_cursor_offset_on_screen(self.cursor_position)
if self.do_colors():
ATTR_LIST = self.parent.theme_manager.findPair(self) | curses.A_STANDOUT
else:
ATTR_LIST = curses.A_STANDOUT
self.add_line(self.rely,
self.begin_at + self.relx + self.left_margin + offset,
char_under_cur,
self.make_attributes_list(char_under_cur, ATTR_LIST),
# I don't understand why the "- self.begin_at" is needed in the following line
# but it is or the cursor can end up overrunning the end of the widget.
self.maximum_string_length+1 - self.left_margin - offset - self.begin_at,
)
def h_addch(self, inp):
if self.editable:
#self.value = self.value[:self.cursor_position] + curses.keyname(input) \
# + self.value[self.cursor_position:]
#self.cursor_position += len(curses.keyname(input))
# workaround for the metamode bug:
if self._last_get_ch_was_unicode == True and isinstance(self.value, bytes):
# probably dealing with python2.
ch_adding = inp
self.value = self.value.decode()
elif self._last_get_ch_was_unicode == True:
ch_adding = inp
else:
try:
ch_adding = chr(inp)
except TypeError:
ch_adding = input
self.value = self.value[:self.cursor_position] + [ch_adding,] \
+ self.value[self.cursor_position:]
self.cursor_position += len(ch_adding)
def display_value(self, vl):
return vl
def calculate_area_needed(self):
"Need one line of screen, and any width going"
return 1,0
class TitleTextTokens(wgtitlefield.TitleText):
_entry_type = TextTokens
|
kennethgillen/ansible
|
refs/heads/devel
|
test/units/plugins/test_plugins.py
|
104
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import unittest
from ansible.compat.tests import BUILTINS
from ansible.compat.tests.mock import mock_open, patch, MagicMock
from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader
class TestErrors(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch.object(PluginLoader, '_get_paths')
def test_print_paths(self, mock_method):
mock_method.return_value = ['/path/one', '/path/two', '/path/three']
pl = PluginLoader('foo', 'foo', '', 'test_plugins')
paths = pl.print_paths()
expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three'])
self.assertEqual(paths, expected_paths)
def test_plugins__get_package_paths_no_package(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
self.assertEqual(pl._get_package_paths(), [])
def test_plugins__get_package_paths_with_package(self):
# the _get_package_paths() call uses __import__ to load a
# python library, and then uses the __file__ attribute of
# the result for that to get the library path, so we mock
# that here and patch the builtin to use our mocked result
foo = MagicMock()
bar = MagicMock()
bam = MagicMock()
bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py'
bar.bam = bam
foo.return_value.bar = bar
pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin')
with patch('{0}.__import__'.format(BUILTINS), foo):
self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam'])
def test_plugins__get_paths(self):
pl = PluginLoader('test', '', 'test', 'test_plugin')
pl._paths = ['/path/one', '/path/two']
self.assertEqual(pl._get_paths(), ['/path/one', '/path/two'])
# NOT YET WORKING
#def fake_glob(path):
# if path == 'test/*':
# return ['test/foo', 'test/bar', 'test/bam']
# elif path == 'test/*/*'
#m._paths = None
#mock_glob = MagicMock()
#mock_glob.return_value = []
#with patch('glob.glob', mock_glob):
# pass
def assertPluginLoaderConfigBecomes(self, arg, expected):
pl = PluginLoader('test', '', arg, 'test_plugin')
self.assertEqual(pl.config, expected)
def test_plugin__init_config_list(self):
config = ['/one', '/two']
self.assertPluginLoaderConfigBecomes(config, config)
def test_plugin__init_config_str(self):
self.assertPluginLoaderConfigBecomes('test', ['test'])
def test_plugin__init_config_none(self):
self.assertPluginLoaderConfigBecomes(None, [])
|
caktus/django-styleguide
|
refs/heads/develop
|
styleguide/management/commands/copypattern.py
|
1
|
import os
import shutil
from django.apps import apps
from django.core.management.base import LabelCommand, CommandError
from django import template
from styleguide.utils import get_styleguide_templates
class Command(LabelCommand):
help = 'Copy built-in pattern from the styleguide to project templates'
def handle_label(self, label, **options):
source_name, source_path = self.get_template_path(label)
try:
project_templates = template.engines.templates['django']['DIRS'][0]
except (KeyError, IndexError):
raise CommandError("Project template DIRS not found")
dest_dir = os.path.join(project_templates, "styleguide")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_path = os.path.join(dest_dir, source_name)
if os.path.isfile(dest_path):
raise CommandError("{} already exists".format(dest_path))
shutil.copy(source_path, dest_path)
def get_template_path(self, slug):
app_config = apps.get_app_config('styleguide')
template_name = "styleguide-{}.html".format(slug)
template_path = os.path.join(app_config.path, "templates/styleguide",
template_name)
if not os.path.isfile(template_path):
raise CommandError("Pattern '{}' not found".format(slug))
return template_name, template_path
|
daasbank/swift
|
refs/heads/master
|
swift/account/replicator.py
|
43
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.account.backend import AccountBroker, DATADIR
from swift.common import db_replicator
class AccountReplicator(db_replicator.Replicator):
server_type = 'account'
brokerclass = AccountBroker
datadir = DATADIR
default_port = 6002
|
pacificIT/mopidy
|
refs/heads/develop
|
mopidy/core/library.py
|
6
|
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import logging
import operator
import urlparse
from mopidy import compat, exceptions, models
from mopidy.internal import deprecation, validation
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _backend_error_handling(backend, reraise=None):
try:
yield
except exceptions.ValidationError as e:
logger.error('%s backend returned bad data: %s',
backend.actor_ref.actor_class.__name__, e)
except Exception as e:
if reraise and isinstance(e, reraise):
raise
logger.exception('%s backend caused an exception.',
backend.actor_ref.actor_class.__name__)
class LibraryController(object):
pykka_traversable = True
def __init__(self, backends, core):
self.backends = backends
self.core = core
def _get_backend(self, uri):
uri_scheme = urlparse.urlparse(uri).scheme
return self.backends.with_library.get(uri_scheme, None)
def _get_backends_to_uris(self, uris):
if uris:
backends_to_uris = collections.defaultdict(list)
for uri in uris:
backend = self._get_backend(uri)
if backend is not None:
backends_to_uris[backend].append(uri)
else:
backends_to_uris = dict([
(b, None) for b in self.backends.with_library.values()])
return backends_to_uris
def browse(self, uri):
"""
Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass
:class:`None` as the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
.. versionadded:: 0.18
"""
if uri is None:
return self._roots()
elif not uri.strip():
return []
validation.check_uri(uri)
return self._browse(uri)
def _roots(self):
directories = set()
backends = self.backends.with_library_browse.values()
futures = {b: b.library.root_directory for b in backends}
for backend, future in futures.items():
with _backend_error_handling(backend):
root = future.get()
validation.check_instance(root, models.Ref)
directories.add(root)
return sorted(directories, key=operator.attrgetter('name'))
def _browse(self, uri):
scheme = urlparse.urlparse(uri).scheme
backend = self.backends.with_library_browse.get(scheme)
if not backend:
return []
with _backend_error_handling(backend):
result = backend.library.browse(uri).get()
validation.check_instances(result, models.Ref)
return result
return []
def get_distinct(self, field, query=None):
"""
List distinct values for a given field from the library.
This has mainly been added to support the list commands the MPD
protocol supports in a more sane fashion. Other frontends are not
recommended to use this method.
:param string field: One of ``track``, ``artist``, ``albumartist``,
``album``, ``composer``, ``performer``, ``date`` or ``genre``.
:param dict query: Query to use for limiting results, see
:meth:`search` for details about the query format.
:rtype: set of values corresponding to the requested field type.
.. versionadded:: 1.0
"""
validation.check_choice(field, validation.DISTINCT_FIELDS)
query is None or validation.check_query(query) # TODO: normalize?
result = set()
futures = {b: b.library.get_distinct(field, query)
for b in self.backends.with_library.values()}
for backend, future in futures.items():
with _backend_error_handling(backend):
values = future.get()
if values is not None:
validation.check_instances(values, compat.text_type)
result.update(values)
return result
def get_images(self, uris):
"""Lookup the images for the given URIs
Backends can use this to return image URIs for any URI they know about
be it tracks, albums, playlists... The lookup result is a dictionary
mapping the provided URIs to lists of images.
Unknown URIs or URIs the corresponding backend couldn't find anything
for will simply return an empty list for that URI.
:param uris: list of URIs to find images for
:type uris: list of string
:rtype: {uri: tuple of :class:`mopidy.models.Image`}
.. versionadded:: 1.0
"""
validation.check_uris(uris)
futures = {
backend: backend.library.get_images(backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items() if backend_uris}
results = {uri: tuple() for uri in uris}
for backend, future in futures.items():
with _backend_error_handling(backend):
if future.get() is None:
continue
validation.check_instance(future.get(), collections.Mapping)
for uri, images in future.get().items():
if uri not in uris:
raise exceptions.ValidationError(
'Got unknown image URI: %s' % uri)
validation.check_instances(images, models.Image)
results[uri] += tuple(images)
return results
def find_exact(self, query=None, uris=None, **kwargs):
"""Search the library for tracks where ``field`` is ``values``.
.. deprecated:: 1.0
Use :meth:`search` with ``exact`` set.
"""
deprecation.warn('core.library.find_exact')
return self.search(query=query, uris=uris, exact=True, **kwargs)
def lookup(self, uri=None, uris=None):
"""
Lookup the given URIs.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uri: track URI
:type uri: string or :class:`None`
:param uris: track URIs
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.Track` if uri was set or
{uri: list of :class:`mopidy.models.Track`} if uris was set.
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``uri`` argument. Use ``uris`` instead.
"""
if sum(o is not None for o in [uri, uris]) != 1:
raise ValueError('Exactly one of "uri" or "uris" must be set')
uris is None or validation.check_uris(uris)
uri is None or validation.check_uri(uri)
if uri:
deprecation.warn('core.library.lookup:uri_arg')
if uri is not None:
uris = [uri]
futures = {}
results = {u: [] for u in uris}
# TODO: lookup(uris) to backend APIs
for backend, backend_uris in self._get_backends_to_uris(uris).items():
for u in backend_uris:
futures[(backend, u)] = backend.library.lookup(u)
for (backend, u), future in futures.items():
with _backend_error_handling(backend):
result = future.get()
if result is not None:
validation.check_instances(result, models.Track)
results[u] = result
if uri:
return results[uri]
return results
def refresh(self, uri=None):
"""
Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
"""
uri is None or validation.check_uri(uri)
futures = {}
backends = {}
uri_scheme = urlparse.urlparse(uri).scheme if uri else None
for backend_scheme, backend in self.backends.with_library.items():
backends.setdefault(backend, set()).add(backend_scheme)
for backend, backend_schemes in backends.items():
if uri_scheme is None or uri_scheme in backend_schemes:
futures[backend] = backend.library.refresh(uri)
for backend, future in futures.items():
with _backend_error_handling(backend):
future.get()
def search(self, query=None, uris=None, exact=False, **kwargs):
"""
Search the library for tracks where ``field`` contains ``values``.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
# Returns results matching artist 'xyz' and 'abc' in any backend
search({'artist': ['xyz', 'abc']})
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of string or :class:`None`
:param exact: if the search should use exact matching
:type exact: :class:`bool`
:rtype: list of :class:`mopidy.models.SearchResult`
.. versionadded:: 1.0
The ``exact`` keyword argument, which replaces :meth:`find_exact`.
.. deprecated:: 1.0
Previously, if the query was empty, and the backend could support
it, all available tracks were returned. This has not changed, but
it is strongly discouraged. No new code should rely on this
behavior.
.. deprecated:: 1.1
Providing the search query via ``kwargs`` is no longer supported.
"""
query = _normalize_query(query or kwargs)
uris is None or validation.check_uris(uris)
query is None or validation.check_query(query)
validation.check_boolean(exact)
if kwargs:
deprecation.warn('core.library.search:kwargs_query')
if not query:
deprecation.warn('core.library.search:empty_query')
futures = {}
for backend, backend_uris in self._get_backends_to_uris(uris).items():
futures[backend] = backend.library.search(
query=query, uris=backend_uris, exact=exact)
# Some of our tests check for LookupError to catch bad queries. This is
# silly and should be replaced with query validation before passing it
# to the backends.
reraise = (TypeError, LookupError)
results = []
for backend, future in futures.items():
try:
with _backend_error_handling(backend, reraise=reraise):
result = future.get()
if result is not None:
validation.check_instance(result, models.SearchResult)
results.append(result)
except TypeError:
backend_name = backend.actor_ref.actor_class.__name__
logger.warning(
'%s does not implement library.search() with "exact" '
'support. Please upgrade it.', backend_name)
return results
def _normalize_query(query):
broken_client = False
# TODO: this breaks if query is not a dictionary like object...
for (field, values) in query.items():
if isinstance(values, basestring):
broken_client = True
query[field] = [values]
if broken_client:
logger.warning(
'A client or frontend made a broken library search. Values in '
'queries must be lists of strings, not a string. Please check what'
' sent this query and file a bug. Query: %s', query)
if not query:
logger.warning(
'A client or frontend made a library search with an empty query. '
'This is strongly discouraged. Please check what sent this query '
'and file a bug.')
return query
|
ldtri0209/robotframework
|
refs/heads/master
|
src/robot/result/__init__.py
|
6
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements parsing execution results from XML output files.
The public API of this package is the :func:`~.ExecutionResult` factory
method, which returns :class:`~.Result` objects, and :class:`~.ResultVisitor`
abstract class to ease further processing the results. It is highly
recommended to use the public API via the :mod:`robot.api` package like in
the example below.
This package is considered stable.
Example
-------
.. literalinclude:: /../../doc/api/code_examples/check_test_times.py
"""
from .executionresult import Result
from .resultbuilder import ExecutionResult
from .testsuite import TestSuite
from .visitor import ResultVisitor
|
kwlzn/rpyc
|
refs/heads/master
|
rpyc/utils/registry.py
|
8
|
"""
RPyC **registry server** implementation. The registry is much like
`Avahi <http://en.wikipedia.org/wiki/Avahi_(software)>`_ or
`Bonjour <http://en.wikipedia.org/wiki/Bonjour_(software)>`_, but tailored to
the needs of RPyC. Also, neither of them supports (or supported) Windows,
and Bonjour has a restrictive license. Moreover, they are too "powerful" for
what RPyC needed and required too complex a setup.
If anyone wants to implement the RPyC registry using Avahi, Bonjour, or any
other zeroconf implementation -- I'll be happy to include them.
Refer to :file:`rpyc/scripts/rpyc_registry.py` for more info.
"""
import sys
import socket
import time
import logging
from rpyc.core import brine
DEFAULT_PRUNING_TIMEOUT = 4 * 60
MAX_DGRAM_SIZE = 1500
REGISTRY_PORT = 18811
#------------------------------------------------------------------------------
# servers
#------------------------------------------------------------------------------
class RegistryServer(object):
"""Base registry server"""
def __init__(self, listenersock, pruning_timeout = None, logger = None):
self.sock = listenersock
self.port = self.sock.getsockname()[1]
self.active = False
self.services = {}
if pruning_timeout is None:
pruning_timeout = DEFAULT_PRUNING_TIMEOUT
self.pruning_timeout = pruning_timeout
if logger is None:
logger = self._get_logger()
self.logger = logger
def _get_logger(self):
raise NotImplementedError()
def on_service_added(self, name, addrinfo):
"""called when a new service joins the registry (but not on keepalives).
override this to add custom logic"""
def on_service_removed(self, name, addrinfo):
"""called when a service unregisters or is pruned.
override this to add custom logic"""
def _add_service(self, name, addrinfo):
"""updates the service's keep-alive time stamp"""
if name not in self.services:
self.services[name] = {}
is_new = addrinfo not in self.services[name]
self.services[name][addrinfo] = time.time()
if is_new:
try:
self.on_service_added(name, addrinfo)
except Exception:
self.logger.exception('error executing service add callback')
def _remove_service(self, name, addrinfo):
"""removes a single server of the given service"""
self.services[name].pop(addrinfo, None)
if not self.services[name]:
del self.services[name]
try:
self.on_service_removed(name, addrinfo)
except Exception:
self.logger.exception('error executing service remove callback')
def cmd_query(self, host, name):
"""implementation of the ``query`` command"""
name = name.upper()
self.logger.debug("querying for %r", name)
if name not in self.services:
self.logger.debug("no such service")
return ()
oldest = time.time() - self.pruning_timeout
all_servers = sorted(self.services[name].items(), key = lambda x: x[1])
servers = []
for addrinfo, t in all_servers:
if t < oldest:
self.logger.debug("discarding stale %s:%s", *addrinfo)
self._remove_service(name, addrinfo)
else:
servers.append(addrinfo)
self.logger.debug("replying with %r", servers)
return tuple(servers)
def cmd_register(self, host, names, port):
"""implementation of the ``register`` command"""
self.logger.debug("registering %s:%s as %s", host, port, ", ".join(names))
for name in names:
self._add_service(name.upper(), (host, port))
return "OK"
def cmd_unregister(self, host, port):
"""implementation of the ``unregister`` command"""
self.logger.debug("unregistering %s:%s", host, port)
for name in self.services.keys():
self._remove_service(name, (host, port))
return "OK"
def _recv(self):
raise NotImplementedError()
def _send(self, data, addrinfo):
raise NotImplementedError()
def _work(self):
while self.active:
try:
data, addrinfo = self._recv()
except (socket.error, socket.timeout):
continue
try:
magic, cmd, args = brine.load(data)
except Exception:
continue
if magic != "RPYC":
self.logger.warn("invalid magic: %r", magic)
continue
cmdfunc = getattr(self, "cmd_%s" % (cmd.lower(),), None)
if not cmdfunc:
self.logger.warn("unknown command: %r", cmd)
continue
try:
reply = cmdfunc(addrinfo[0], *args)
except Exception:
self.logger.exception('error executing function')
else:
self._send(brine.dump(reply), addrinfo)
def start(self):
"""Starts the registry server (blocks)"""
if self.active:
raise ValueError("server is already running")
if self.sock is None:
raise ValueError("object disposed")
self.logger.debug("server started on %s:%s", *self.sock.getsockname()[:2])
try:
self.active = True
self._work()
except KeyboardInterrupt:
self.logger.warn("User interrupt!")
finally:
self.active = False
self.logger.debug("server closed")
self.sock.close()
self.sock = None
def close(self):
"""Closes (terminates) the registry server"""
if not self.active:
raise ValueError("server is not running")
self.logger.debug("stopping server...")
self.active = False
class UDPRegistryServer(RegistryServer):
"""UDP-based registry server. The server listens to UDP broadcasts and
answers them. Useful in local networks, were broadcasts are allowed"""
TIMEOUT = 1.0
def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None, logger = None):
family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0,
socket.SOCK_DGRAM)[0]
sock = socket.socket(family, socktype, proto)
sock.bind(sockaddr)
sock.settimeout(self.TIMEOUT)
RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout,
logger = logger)
def _get_logger(self):
return logging.getLogger("REGSRV/UDP/%d" % (self.port,))
def _recv(self):
return self.sock.recvfrom(MAX_DGRAM_SIZE)
def _send(self, data, addrinfo):
try:
self.sock.sendto(data, addrinfo)
except (socket.error, socket.timeout):
pass
class TCPRegistryServer(RegistryServer):
"""TCP-based registry server. The server listens to a certain TCP port and
answers requests. Useful when you need to cross routers in the network, since
they block UDP broadcasts"""
TIMEOUT = 3.0
def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None,
logger = None, reuse_addr = True):
family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0,
socket.SOCK_STREAM)[0]
sock = socket.socket(family, socktype, proto)
if reuse_addr and sys.platform != "win32":
# warning: reuseaddr is not what you expect on windows!
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(sockaddr)
sock.listen(10)
sock.settimeout(self.TIMEOUT)
RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout,
logger = logger)
self._connected_sockets = {}
def _get_logger(self):
return logging.getLogger("REGSRV/TCP/%d" % (self.port,))
def _recv(self):
sock2, _ = self.sock.accept()
addrinfo = sock2.getpeername()
data = sock2.recv(MAX_DGRAM_SIZE)
self._connected_sockets[addrinfo] = sock2
return data, addrinfo
def _send(self, data, addrinfo):
sock2 = self._connected_sockets.pop(addrinfo)
try:
sock2.send(data)
except (socket.error, socket.timeout):
pass
finally:
sock2.close()
#------------------------------------------------------------------------------
# clients (registrars)
#------------------------------------------------------------------------------
class RegistryClient(object):
"""Base registry client. Also known as **registrar**"""
REREGISTER_INTERVAL = 60
def __init__(self, ip, port, timeout, logger = None):
self.ip = ip
self.port = port
self.timeout = timeout
if logger is None:
logger = self._get_logger()
self.logger = logger
def _get_logger(self):
raise NotImplementedError()
def discover(self, name):
"""Sends a query for the specified service name.
:param name: the service name (or one of its aliases)
:returns: a list of ``(host, port)`` tuples
"""
raise NotImplementedError()
def register(self, aliases, port):
"""Registers the given service aliases with the given TCP port. This
API is intended to be called only by an RPyC server.
:param aliases: the :class:`service's <rpyc.core.service.Service>` aliases
:param port: the listening TCP port of the server
"""
raise NotImplementedError()
def unregister(self, port):
"""Unregisters the given RPyC server. This API is intended to be called
only by an RPyC server.
:param port: the listening TCP port of the RPyC server to unregister
"""
raise NotImplementedError()
class UDPRegistryClient(RegistryClient):
"""UDP-based registry clients. By default, it sends UDP broadcasts (requires
special user privileges on certain OS's) and collects the replies. You can
also specify the IP address to send to.
Example::
registrar = UDPRegistryClient()
list_of_servers = registrar.discover("foo")
.. note::
Consider using :func:`rpyc.utils.factory.discover` instead
"""
def __init__(self, ip = "255.255.255.255", port = REGISTRY_PORT, timeout = 2,
bcast = None, logger = None, ipv6 = False):
RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout,
logger = logger)
if ipv6:
self.sock_family = socket.AF_INET6
self.bcast = False
else:
self.sock_family = socket.AF_INET
if bcast is None:
bcast = "255" in ip.split(".")
self.bcast = bcast
def _get_logger(self):
return logging.getLogger('REGCLNT/UDP')
def discover(self, name):
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "QUERY", (name,)))
sock.sendto(data, (self.ip, self.port))
sock.settimeout(self.timeout)
try:
data, _ = sock.recvfrom(MAX_DGRAM_SIZE)
except (socket.error, socket.timeout):
servers = ()
else:
servers = brine.load(data)
finally:
sock.close()
return servers
def register(self, aliases, port, interface = ""):
self.logger.info("registering on %s:%s", self.ip, self.port)
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
sock.bind((interface, 0))
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "REGISTER", (aliases, port)))
sock.sendto(data, (self.ip, self.port))
tmax = time.time() + self.timeout
while time.time() < tmax:
sock.settimeout(tmax - time.time())
try:
data, address = sock.recvfrom(MAX_DGRAM_SIZE)
rip, rport = address[:2]
except socket.timeout:
self.logger.warn("no registry acknowledged")
return False
if rport != self.port:
continue
try:
reply = brine.load(data)
except Exception:
continue
if reply == "OK":
self.logger.info("registry %s:%s acknowledged", rip, rport)
return True
else:
self.logger.warn("no registry acknowledged")
return False
finally:
sock.close()
def unregister(self, port):
self.logger.info("unregistering from %s:%s", self.ip, self.port)
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "UNREGISTER", (port,)))
sock.sendto(data, (self.ip, self.port))
finally:
sock.close()
class TCPRegistryClient(RegistryClient):
"""TCP-based registry client. You must specify the host (registry server)
to connect to.
Example::
registrar = TCPRegistryClient("localhost")
list_of_servers = registrar.discover("foo")
.. note::
Consider using :func:`rpyc.utils.factory.discover` instead
"""
def __init__(self, ip, port = REGISTRY_PORT, timeout = 2, logger = None):
RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout,
logger = logger)
def _get_logger(self):
return logging.getLogger('REGCLNT/TCP')
def discover(self, name):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
try:
data = brine.dump(("RPYC", "QUERY", (name,)))
sock.connect((self.ip, self.port))
sock.send(data)
try:
data = sock.recv(MAX_DGRAM_SIZE)
except (socket.error, socket.timeout):
servers = ()
else:
servers = brine.load(data)
finally:
sock.close()
return servers
def register(self, aliases, port, interface = ""):
self.logger.info("registering on %s:%s", self.ip, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((interface, 0))
sock.settimeout(self.timeout)
data = brine.dump(("RPYC", "REGISTER", (aliases, port)))
try:
try:
sock.connect((self.ip, self.port))
sock.send(data)
except (socket.error, socket.timeout):
self.logger.warn("could not connect to registry")
return False
try:
data = sock.recv(MAX_DGRAM_SIZE)
except socket.timeout:
self.logger.warn("registry did not acknowledge")
return False
try:
reply = brine.load(data)
except Exception:
self.logger.warn("received corrupted data from registry")
return False
if reply == "OK":
self.logger.info("registry %s:%s acknowledged", self.ip, self.port)
return True
finally:
sock.close()
def unregister(self, port):
self.logger.info("unregistering from %s:%s", self.ip, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
try:
data = brine.dump(("RPYC", "UNREGISTER", (port,)))
try:
sock.connect((self.ip, self.port))
sock.send(data)
except (socket.error, socket.timeout):
self.logger.warn("could not connect to registry")
finally:
sock.close()
|
dariemp/odoo
|
refs/heads/8.0
|
addons/account/account_cash_statement.py
|
283
|
# encoding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_cashbox_line(osv.osv):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'pieces'
def _sub_total(self, cr, uid, ids, name, arg, context=None):
""" Calculates Sub total
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = {
'subtotal_opening' : obj.pieces * obj.number_opening,
'subtotal_closing' : obj.pieces * obj.number_closing,
}
return res
def on_change_sub_opening(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the opening """
return {'value' : {'subtotal_opening' : (pieces * number) or 0.0 }}
def on_change_sub_closing(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the closing """
return {'value' : {'subtotal_closing' : (pieces * number) or 0.0 }}
_columns = {
'pieces': fields.float('Unit of Currency', digits_compute=dp.get_precision('Account')),
'number_opening' : fields.integer('Number of Units', help='Opening Unit Numbers'),
'number_closing' : fields.integer('Number of Units', help='Closing Unit Numbers'),
'subtotal_opening': fields.function(_sub_total, string='Opening Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'subtotal_closing': fields.function(_sub_total, string='Closing Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'bank_statement_id' : fields.many2one('account.bank.statement', ondelete='cascade'),
}
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
def _update_balances(self, cr, uid, ids, context=None):
"""
Set starting and ending balances according to pieces count
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
if (statement.journal_id.type not in ('cash',)):
continue
if not statement.journal_id.cash_control:
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
if float_compare(statement.balance_end_real, statement.balance_end, precision_digits=prec):
statement.write({'balance_end_real' : statement.balance_end})
continue
start = end = 0
for line in statement.details_ids:
start += line.subtotal_opening
end += line.subtotal_closing
data = {
'balance_start': start,
'balance_end_real': end,
}
res[statement.id] = data
super(account_cash_statement, self).write(cr, uid, [statement.id], data, context=context)
return res
def _get_sum_entry_encoding(self, cr, uid, ids, name, arg, context=None):
""" Find encoding total of statements "
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = sum((line.amount for line in statement.line_ids), 0.0)
return res
def _get_company(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
company_pool = self.pool.get('res.company')
user = user_pool.browse(cr, uid, uid, context=context)
company_id = user.company_id
if not company_id:
company_id = company_pool.search(cr, uid, [])
return company_id and company_id[0] or False
def _get_statement_from_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _compute_difference(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.balance_end_real - obj.balance_end
return result
def _compute_last_closing_balance(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
if obj.state == 'draft':
statement_ids = self.search(cr, uid,
[('journal_id', '=', obj.journal_id.id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
if not statement_ids:
continue
else:
st = self.browse(cr, uid, statement_ids[0], context=context)
result[obj.id] = st.balance_end_real
return result
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
result = super(account_cash_statement, self).onchange_journal_id(cr, uid, ids, journal_id)
if not journal_id:
return result
statement_ids = self.search(cr, uid,
[('journal_id', '=', journal_id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
opening_details_ids = self._get_cash_open_box_lines(cr, uid, journal_id, context)
if opening_details_ids:
result['value']['opening_details_ids'] = opening_details_ids
if not statement_ids:
return result
st = self.browse(cr, uid, statement_ids[0], context=context)
result.setdefault('value', {}).update({'last_closing_balance' : st.balance_end_real})
return result
_columns = {
'total_entry_encoding': fields.function(_get_sum_entry_encoding, string="Total Transactions",
store = {
'account.bank.statement': (lambda self, cr, uid, ids, context=None: ids, ['line_ids','move_line_ids'], 10),
'account.bank.statement.line': (_get_statement_from_line, ['amount'], 10),
},
help="Total of cash transaction lines."),
'closing_date': fields.datetime("Closed On"),
'details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='CashBox Lines', copy=True),
'opening_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Opening Cashbox Lines'),
'closing_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Closing Cashbox Lines'),
'user_id': fields.many2one('res.users', 'Responsible', required=False),
'difference' : fields.function(_compute_difference, method=True, string="Difference", type="float", help="Difference between the theoretical closing balance and the real closing balance."),
'last_closing_balance' : fields.function(_compute_last_closing_balance, method=True, string='Last Closing Balance', type='float'),
}
_defaults = {
'state': 'draft',
'date': lambda self, cr, uid, context={}: context.get('date', time.strftime("%Y-%m-%d %H:%M:%S")),
'user_id': lambda self, cr, uid, context=None: uid,
}
def _get_cash_open_box_lines(self, cr, uid, journal_id, context):
details_ids = []
if not journal_id:
return details_ids
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal and (journal.type == 'cash'):
last_pieces = None
if journal.with_last_closing_balance == True:
domain = [('journal_id', '=', journal.id),
('state', '=', 'confirm')]
last_bank_statement_ids = self.search(cr, uid, domain, limit=1, order='create_date desc', context=context)
if last_bank_statement_ids:
last_bank_statement = self.browse(cr, uid, last_bank_statement_ids[0], context=context)
last_pieces = dict(
(line.pieces, line.number_closing) for line in last_bank_statement.details_ids
)
for value in journal.cashbox_line_ids:
nested_values = {
'number_closing' : 0,
'number_opening' : last_pieces.get(value.pieces, 0) if isinstance(last_pieces, dict) else 0,
'pieces' : value.pieces
}
details_ids.append([0, False, nested_values])
return details_ids
def create(self, cr, uid, vals, context=None):
journal_id = vals.get('journal_id')
if journal_id and not vals.get('opening_details_ids'):
vals['opening_details_ids'] = vals.get('opening_details_ids') or self._get_cash_open_box_lines(cr, uid, journal_id, context)
res_id = super(account_cash_statement, self).create(cr, uid, vals, context=context)
self._update_balances(cr, uid, [res_id], context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
"""
Update redord(s) comes in {ids}, with new value comes as {vals}
return True on success, False otherwise
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids to be update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: True on success, False otherwise
"""
if vals.get('journal_id', False):
cashbox_line_obj = self.pool.get('account.cashbox.line')
cashbox_ids = cashbox_line_obj.search(cr, uid, [('bank_statement_id', 'in', ids)], context=context)
cashbox_line_obj.unlink(cr, uid, cashbox_ids, context)
res = super(account_cash_statement, self).write(cr, uid, ids, vals, context=context)
self._update_balances(cr, uid, ids, context)
return res
def _user_allow(self, cr, uid, statement_id, context=None):
return True
def button_open(self, cr, uid, ids, context=None):
""" Changes statement state to Running.
@return: True
"""
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
statement_pool = self.pool.get('account.bank.statement')
for statement in statement_pool.browse(cr, uid, ids, context=context):
vals = {}
if not self._user_allow(cr, uid, statement.id, context=context):
raise osv.except_osv(_('Error!'), (_('You do not have rights to open this %s journal!') % (statement.journal_id.name, )))
if statement.name and statement.name == '/':
c = {'fiscalyear_id': statement.period_id.fiscalyear_id.id}
if statement.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, statement.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.cash.statement', context=c)
vals.update({
'name': st_number
})
vals.update({
'state': 'open',
})
self.write(cr, uid, [statement.id], vals, context=context)
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
if journal_type == 'bank':
return super(account_cash_statement, self).statement_close(cr, uid, ids, journal_type, context)
vals = {
'state':'confirm',
'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")
}
return self.write(cr, uid, ids, vals, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
if journal_type == 'bank':
return super(account_cash_statement, self).check_status_condition(cr, uid, state, journal_type)
return state=='open'
def button_confirm_cash(self, cr, uid, ids, context=None):
absl_proxy = self.pool.get('account.bank.statement.line')
TABLES = ((_('Profit'), 'profit_account_id'), (_('Loss'), 'loss_account_id'),)
for obj in self.browse(cr, uid, ids, context=context):
if obj.difference == 0.0:
continue
elif obj.difference < 0.0:
account = obj.journal_id.loss_account_id
name = _('Loss')
if not obj.journal_id.loss_account_id:
raise osv.except_osv(_('Error!'), _('There is no Loss Account on the journal %s.') % (obj.journal_id.name,))
else: # obj.difference > 0.0
account = obj.journal_id.profit_account_id
name = _('Profit')
if not obj.journal_id.profit_account_id:
raise osv.except_osv(_('Error!'), _('There is no Profit Account on the journal %s.') % (obj.journal_id.name,))
values = {
'statement_id' : obj.id,
'journal_id' : obj.journal_id.id,
'account_id' : account.id,
'amount' : obj.difference,
'name' : name,
}
absl_proxy.create(cr, uid, values, context=context)
return super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)
class account_journal(osv.osv):
_inherit = 'account.journal'
def _default_cashbox_line_ids(self, cr, uid, context=None):
# Return a list of coins in Euros.
result = [
dict(pieces=value) for value in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500]
]
return result
_columns = {
'cashbox_line_ids' : fields.one2many('account.journal.cashbox.line', 'journal_id', 'CashBox', copy=True),
}
_defaults = {
'cashbox_line_ids' : _default_cashbox_line_ids,
}
class account_journal_cashbox_line(osv.osv):
_name = 'account.journal.cashbox.line'
_rec_name = 'pieces'
_columns = {
'pieces': fields.float('Values', digits_compute=dp.get_precision('Account')),
'journal_id' : fields.many2one('account.journal', 'Journal', required=True, select=1, ondelete="cascade"),
}
_order = 'pieces asc'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
podemos-info/odoo
|
refs/heads/6.1
|
addons/point_of_sale/__init__.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import point_of_sale
import account_bank_statement
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mjg/PyX
|
refs/heads/grid-with-axisat
|
pyx/pdfextra.py
|
2
|
# -*- encoding: utf-8 -*-
#
#
# Copyright (C) 2006 Michael Schindler <m-schindler@users.sourceforge.net>
#
# This file is part of PyX (http://pyx.sourceforge.net/).
#
# PyX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyX; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import io, math
from . import baseclasses, bbox, pdfwriter, color, unit
from .font.font import PDFHelvetica, PDFZapfDingbats
# TODO:
# - discuss behaviour under transformations with André and Jörg
# - what about fillstyles here?
# - where should e.g. a font be added to the registry:
# in processPDF or in __init__ of the PDF-item?
# - test for double occurrance of field names:
# this leads to wrong/no display
#
# TODO horizontal alignment in textfields
# flags for annotations:
PDFannotflags = [("invisible", 0), ("hidden", 1), ("printable", 2),
("nozoom", 3), ("norotate", 4), ("noview", 5), ("readonly", 6)]
# flags for form fields
PDFformflags = [("readonly", 0), ("required", 1), ("noexport", 2),
# flags for the button field
("notoggletooff", 14), ("radio", 15), ("pushbutton", 16),
# flags for the choice list field
("combo", 17), ("edit", 18), ("sort", 19), ("multiselect", 21),
# flags for the text field
("multiline", 12), ("password", 13), ("fileselect", 20), ("donotspellcheck", 22),
("donotscroll", 23)]
class flag: # <<<
"""A helper class for handling flags in pdf forms and annotations"""
def __init__(self, value=None):
self.value = value
def is_set(self, bit):
return self.value is not None and (self.value & 1<<bit) == 1<<bit
def set(self, bit):
if self.value is None:
self.value = 1<<bit
else:
self.value = self.value | 1<<bit
def unset(self, bit):
if self.value is not None:
self.value = self.value & ~(1<<bit)
def __int__(self):
return self.value
def __str__(self):
return self.value.__str__()
# >>>
def _pdfflags(flags): # <<<
"""Splits flags into annotation/form part
the flag for the annotation dictionary
the flag for the form (field) dictionary
All flags are handled equally here, independent of their use
for the specific form field.
"""
# we initialize with 0 and set only those flags which are not 0
annotflag = flag(value=0)
formflag = flag(value=0)
for key, value in PDFannotflags:
if key in flags and flags[key]:
annotflag.set(value)
for key, value in PDFformflags:
if key in flags and flags[key]:
formflag.set(value)
return int(annotflag), int(formflag)
# >>>
def _pdfalignment(string): # <<<
alignflag = 0
if string == "c":
alignflag = 1
elif string == "r":
alignflag = 2
return alignflag
# >>>
def _topt(value, type="u", un="cm"): # <<<
if isinstance(value, unit.length):
return unit.topt(value)
else:
return unit.topt(unit.length(value, type, un))
# >>>
def _simplestring(text): # <<<
result = ""
for x in text:
if x.isalnum():
result += x
return result
# >>>
def _sizetrafo(s, tr): # <<<
x1, y1 = tr.apply_pt(s, s)
x0, y0 = tr.apply_pt(0, 0)
return math.hypot(x1 - x0, y1 - y0) * math.sqrt(0.5)
# >>>
class formfield(baseclasses.canvasitem): # <<<
"""Base class for acroforms"""
defaultflags = dict()
def selectflags(self, flags):
newflags = dict(**self.defaultflags)
# overwrite the default flags with given values:
for key, value in list(flags.items()):
if key in newflags:
newflags[key] = value
else:
raise RuntimeError("unknown argument \"%s\" to formfield" % key)
return newflags
def bbox(self):
return bbox.bbox_pt(self.llx_pt, self.lly_pt, self.urx_pt, self.ury_pt)
def processPS(self, file, writer, context, registry, bbox):
raise RuntimeError("postscript output of forms is not supported")
# >>>
class textfield(formfield): # <<<
"""An interactive pdf form field for text input.
The "name" is used for the graphical user interface and for exporing the input data.
Note that the behaviour under rotations is undefined."""
defaultflags = dict(invisible=0, hidden=0, printable=1, nozoom=0, norotate=0, noview=0,
readonly=0, required=0, noexport=0, multiline=0, password=0, fileselect=0,
donotspellcheck=1, donotscroll=0)
def __init__(self, x, y, width, height, name, defaultvalue="", fontsize=10, font=PDFHelvetica,
fontrelleading=1.16, borderwidth=0, align="l", **flags):
self.llx_pt, self.lly_pt = _topt(x), _topt(y)
self.urx_pt, self.ury_pt = _topt(x+width), _topt(y+height)
self.name = name
self.defaultvalue = defaultvalue
self.fontsize_pt = _topt(fontsize, "x", "pt")
self.font = font
self.fontrelleading = fontrelleading
self.borderwidth_pt = _topt(borderwidth, "x", "pt")
self.align = align
self.flags = self.selectflags(flags)
def processPDF(self, file, writer, context, registry, bbox):
# the bounding box is transformed by the canvas
bbox += self.bbox()
# the annotation rectangle must be transformed separately:
llx_pt, lly_pt = context.trafo.apply_pt(self.llx_pt, self.lly_pt)
urx_pt, ury_pt = context.trafo.apply_pt(self.urx_pt, self.ury_pt)
fontsize_pt = _sizetrafo(self.fontsize_pt, context.trafo)
borderwidth_pt = _sizetrafo(self.borderwidth_pt, context.trafo)
# we create numbers from the flags given
annotflag, formflag = _pdfflags(self.flags)
alignflag = _pdfalignment(self.align)
registry.add(PDFtextfield((llx_pt, lly_pt, urx_pt, ury_pt), self.name, self.defaultvalue,
fontsize_pt, self.font, self.fontrelleading*fontsize_pt,
borderwidth_pt, (not self.flags["multiline"]),
alignflag, annotflag, formflag, context.fillstyles, writer, registry))
# >>>
class PDFtextfield(pdfwriter.PDFobject): # <<<
def __init__(self, bb_pt, name, defaultvalue, fontsize, font, fontleading,
borderwidth, vcenter,
alignflag, annotflag, formflag, fillstyles, writer, registry):
pdfwriter.PDFobject.__init__(self, "formfield_text")
# append this formfield to the global document form
# and to the annotation list of the page:
self.PDFform = None
for object in registry.objects:
if object.type == "form":
object.append(self)
self.PDFform = object
elif object.type == "annotations":
object.append(self)
self.name = name
self.bb_pt = bb_pt
self.defaultvalue = defaultvalue
self.fontsize = fontsize
self.font = font
if self.font is None:
self.font = PDFHelvetica
self.fontleading = fontleading
self.borderwidth = borderwidth
self.alignflag = alignflag
self.formflag = formflag
self.annotflag = annotflag
self.registry = pdfwriter.PDFregistry()
self.registry.addresource("Font", self.font.name, self.font, procset="Text")
self.registry.add(self.font)
if self.defaultvalue:
text = self.defaultvalue.split("\n")
self.defaulttext = PDFdefaulttext(writer, registry, self.fontsize, self.font,
self.fontleading, text, self.bb_pt, self.borderwidth, vcenter)
self.registry.add(self.defaulttext)
else:
self.defaulttext = None
# process some fillstyles:
fillstring = io.StringIO()
for attr in fillstyles:
if 1:#isinstance(attr, color.color):
cont = pdfwriter.context()
cont.fillattr = 1
cont.strokeattr = 0
attr.processPDF(fillstring, writer, cont, self.registry, bbox)
self.fillstyles = fillstring.getvalue()
fillstring.close()
registry.mergeregistry(self.registry)
def write(self, file, writer, registry):
### the dictionary entries for the annotation
file.write("<</Type /Annot\n")
file.write("/P %d 0 R\n" % registry.getrefno(self.PDFform)) # reference to the page objects
file.write("/Rect [%f %f %f %f]\n" % self.bb_pt) # the annotation rectangle
#ile.write("/BS <</W 0 /S /S>>\n") # border style dictionary
file.write("/Border [0 0 %f]\n" % self.borderwidth) # border style
file.write("/F %d\n" % self.annotflag)
### the dictionary entries for the widget annotations
file.write("/Subtype /Widget\n")
file.write("/H /N\n") # highlight behaviour
if self.defaulttext:
file.write("/AP <</N %d 0 R >>\n" % registry.getrefno(self.defaulttext)) # appearance dictionary
### the dictionary entries for the form field
file.write("/FT /Tx\n") # type of the form field
file.write("/T (%s)\n" % self.name) # partial field name
file.write("/TU (%s)\n" % self.name) # field name for the user-interface
file.write("/TM (%s)\n" % self.name) # field name for exporting the data
file.write("/V (%s)\n" % self.defaultvalue) # starting value
file.write("/DV (%s)\n" % self.defaultvalue) # reset value
file.write("/Ff %d\n" % self.formflag) # flags for various purposes
### the dictionary entries for the text field
file.write("/DR ")
self.registry.writeresources(file) # default resources for appearance
file.write("/DA (%s /%s %f Tf %f TL)\n" % (self.fillstyles, self.font.name, self.fontsize, self.fontleading)) # default appearance string
file.write("/Q %d\n" % self.alignflag)
file.write(">>\n")
# >>>
class PDFdefaulttext(pdfwriter.PDFobject): # <<<
def __init__(self, writer, registry, fontsize, font, fontleading, texts, bb, borderwidth, vcenter):
pdfwriter.PDFobject.__init__(self, "defaulttext")
self.font = font
self.fontsize = fontsize
self.fontleading = fontleading
self.registry = pdfwriter.PDFregistry()
self.registry.addresource("Font", self.font.name, self.font, procset="Text")
self.registry.add(self.font)
self.bb = (0, 0, bb[2] - bb[0], bb[3] - bb[1])
self.texts = [t for t in texts if t]
self.borderwidth = borderwidth
# try to imitate the shifting of PDF:
# the font orientation point is on the baseline of the text
self.hshift = 2*self.borderwidth
if vcenter:
baselinevrel = 0.215
self.vshift = 0.5 * (bb[3] - bb[1]) + (len(self.texts) / 2.0 - 1)*self.fontleading + baselinevrel*self.fontsize
elif (bb[3] - bb[1]) < self.fontleading + 4*self.borderwidth:
baselinevrel = 0.215
self.vshift = 2*self.borderwidth + baselinevrel * self.fontsize
#self.vshift = 0.5 * (bb[3] - bb[1]) - (0.5 - baselinevrel - 0.5*addrelshift)*self.fontsize
else:
baselinevrel = 0.215
addrelshift = 0.215
self.vshift = (bb[3] - bb[1]) - 2*self.borderwidth - self.fontleading + (baselinevrel - addrelshift)*self.fontsize
registry.mergeregistry(self.registry)
def write(self, file, writer, registry):
content = "/Tx BMC q BT /%s %f Tf %f TL %f %f Td (%s) Tj" % (self.font.name, self.fontsize, self.fontleading, self.hshift, self.vshift, self.texts[0])
for text in self.texts[1:]:
content += " (%s)'" % (text)
content += " ET Q EMC\n"
if writer.compress:
import zlib
content = zlib.compress(content)
file.write("<<\n")
file.write("/Type /XObject\n")
file.write("/Subtype /Form\n")
file.write("/BBox [%f %f %f %f]\n" % self.bb)
#ile.write("/Matrix [0.98 0.17 -0.17 0.98 0 0]\n")
file.write("/Resources ")
self.registry.writeresources(file) # default resources for appearance
file.write("/Length %i\n" % len(content))
if writer.compress:
file.write("/Filter /FlateDecode\n")
file.write(">>\n"
"stream\n")
file.write(content)
file.write("endstream\n")
# >>>
class radiobuttons(formfield): # <<<
"""A set of related buttons that can each be on or off.
Typically, at most one radio button in a set may be on at any
given time, and selecting any one of the buttons
automatically deselects all the others.
Note that the behaviour under rotations is undefined."""
defaultflags = dict(invisible=0, hidden=0, printable=1, nozoom=0,
norotate=0, noview=0, readonly=0, required=0, noexport=0, notoggletooff=0)
def __init__(self, positions, name, values, defaultvalue=None, size=10, baselinerelpos=0.2, **flags):
self.name = name
self.size_pt = _topt(size, "x", "pt")
self.positions_pt = [(_topt(x), _topt(y) - baselinerelpos*self.size_pt) for x, y in positions]
self.flags = self.selectflags(flags)
self.flags["radio"] = 1
self.values = values
self.defaultvalue = defaultvalue
def bbox(self):
llx = min([x[0] for x in self.positions_pt])
lly = min([x[1] for x in self.positions_pt])
urx = max([x[0] for x in self.positions_pt]) + self.size_pt
ury = max([x[1] for x in self.positions_pt]) + self.size_pt
return bbox.bbox_pt(llx, lly, urx, ury)
def processPDF(self, file, writer, context, registry, bbox):
# the bbox is transformed by the canvas
bbox += self.bbox()
# the annotation rectangle must be transformed separately:
positions_pt = [context.trafo.apply_pt(x, y) for x, y in self.positions_pt]
size_pt = _sizetrafo(self.size_pt, context.trafo)
# we create numbers from the flags given
annotflag, formflag = _pdfflags(self.flags)
onstate = PDFButtonState(writer, registry,
10, PDFZapfDingbats, bgchar="m", fgchar="8",
bgscale=1.1, bgrelshift=(0, 0.18), fgrelshift=(0.12, 0.26))
offstate = PDFButtonState(writer, registry,
10, PDFZapfDingbats, bgchar="m", fgchar=None,
bgscale=1.1, bgrelshift=(0, 0.18))
registry.add(onstate)
registry.add(offstate)
registry.add(PDFbuttonlist(positions_pt, self.name, size_pt, self.values, self.defaultvalue,
annotflag, formflag, onstate, offstate, writer, registry))
# >>>
class checkbox(formfield): # <<<
"""Toggles between two states, on and off
Note that the behaviour under rotations is undefined."""
defaultflags = dict(invisible=0, hidden=0, printable=1, nozoom=0,
norotate=0, noview=0, readonly=0, required=0, noexport=0)
def __init__(self, x, y, name, defaulton=0, size=10, baselinerelpos=0.2, **flags):
self.name = name
self.size_pt = _topt(size, "x", "pt")
self.llx_pt, self.lly_pt = _topt(x), _topt(y) - baselinerelpos*self.size_pt
self.urx_pt, self.ury_pt = self.llx_pt + self.size_pt, self.lly_pt + self.size_pt
self.flags = self.selectflags(flags)
self.defaulton = defaulton
def processPDF(self, file, writer, context, registry, bbox):
# the bbox is transformed by the canvas
bbox += self.bbox()
# the annotation rectangle must be transformed separately:
positions_pt = [context.trafo.apply_pt(self.llx_pt, self.lly_pt)]
size_pt = _sizetrafo(self.size_pt, context.trafo)
# we create numbers from the flags given
annotflag, formflag = _pdfflags(self.flags)
onstate = PDFButtonState(writer, registry,
10, PDFZapfDingbats, bgchar="o", fgchar="4",
bgscale=1.2, bgrelshift=(0, 0.08), fgscale=0.9, fgrelshift=(0.15, 0.25))
offstate = PDFButtonState(writer, registry,
10, PDFZapfDingbats, bgchar="o", fgchar=None,
bgscale=1.2, bgrelshift=(0, 0.08))
registry.add(onstate)
registry.add(offstate)
if self.defaulton:
default = "Yes"
else:
default = "Off"
registry.add(PDFbuttonlist(positions_pt, self.name, size_pt, ["Yes"], default,
annotflag, formflag, onstate, offstate, writer, registry))
# >>>
class PDFbuttonlist(pdfwriter.PDFobject): # <<<
def __init__(self, positions_pt, name, size_pt, values, defaultvalue, annotflag, formflag,
onstate, offstate, writer, registry):
pdfwriter.PDFobject.__init__(self, "formfield_buttonlist")
# append this formfield to the global document form
# but we do not treat this as a fully valid annotation field
for object in registry.objects:
if object.type == "form":
object.append(self)
self.name = name
self.formflag = formflag
self.annotflag = annotflag
self.size_pt = size_pt
self.defaultvalue = defaultvalue
self.onstate = onstate
self.offstate = offstate
self.checkboxes = []
for i, pos_pt, value in zip(list(range(len(values))), positions_pt, values):
chbox = PDFcheckboxfield(pos_pt, value, size_pt, _simplestring(value), (value == defaultvalue),
self, self.onstate, self.offstate, self.annotflag, self.formflag, writer, registry)
self.checkboxes.append(chbox)
registry.add(chbox)
def write(self, file, writer, registry):
### implementation note: There are some (undocumented) PDF flaws which
### do not allow to inherit certain variables:
### * The parent button may not have /Ff (otherwise, notoggletooff fails)
### * The Kids of a radio button may not have a /T on their own (otherwise, they are not displayed)
### * The /BS and /Border do not draw anything.
### Nevertheless, the border width of /Border is used
### the dictionary entries for the annotation
file.write("<<\n")
### the dictionary entries for the form field
file.write("/FT /Btn\n") # type of the form field
file.write("/Kids [%s]\n" % " ".join(["%d 0 R" % registry.getrefno(x) for x in self.checkboxes]))
file.write("/T (%s)\n" % self.name) # partial field name
file.write("/TU (%s)\n" % self.name) # field name for the user-interface
file.write("/TM (%s)\n" % self.name) # field name for exporting the data
### the dictionary entries for the radiobuttons field
file.write("/V /%s\n" % self.defaultvalue)
file.write(">>\n")
# >>>
class PDFcheckboxfield(pdfwriter.PDFobject): # <<<
def __init__(self, pos_pt, name, size_pt, valuename, defaulton, parent, onstate, offstate, annotflag, formflag, writer, registry):
pdfwriter.PDFobject.__init__(self, "formfield_checkbox")
# we treat this as an annotation only, since the parent is
# already in the form field
self.PDFform = None
for object in registry.objects:
if object.type == "form":
assert self.PDFform is None
self.PDFform = object
if object.type == "annotations":
object.append(self)
self.bb_pt = (pos_pt[0], pos_pt[1], pos_pt[0] + size_pt, pos_pt[1] + size_pt)
self.name = name
self.size_pt = size_pt
self.valuename = valuename
if defaulton:
self.defaultvalue = self.valuename
else:
self.defaultvalue = "Off"
self.parent = parent
self.onstate = onstate
self.offstate = offstate
self.annotflag = annotflag
self.formflag = formflag
def write(self, file, writer, registry):
### the dictionary entries for the annotation
file.write("<<\n")
file.write("/Type /Annot\n")
file.write("/Subtype /Widget\n")
file.write("/P %d 0 R\n" % registry.getrefno(self.PDFform)) # reference to the page objects
file.write("/Rect [%f %f %f %f]\n" % self.bb_pt) # the annotation rectangle
file.write("/F %d\n" % self.annotflag) # flags
### the dictionary entries for the widget annotations
file.write("/H /N\n") # hightlight behaviour
### the dictionary entries for the form field
file.write("/FT /Btn\n") # type of the form field
file.write("/Parent %d 0 R\n" % registry.getrefno(self.parent)) # only for hierarchy
file.write("/AP << /N << /%s %d 0 R /Off %d 0 R >> >>\n" % (self.valuename, registry.getrefno(self.onstate), registry.getrefno(self.offstate)))
file.write("/AS /%s\n" % self.defaultvalue)
file.write("/Ff %d\n" % self.formflag) # Ff may not come from parent!
file.write(">>\n")
# >>>
class PDFButtonState(pdfwriter.PDFobject): # <<<
def __init__(self, writer, registry, fontsize, font, bgchar, fgchar,
bgscale=None, bgrelshift=None, fgscale=None, fgrelshift=None):
pdfwriter.PDFobject.__init__(self, "buttonstate", "buttonstate" + "_".join(map(str, list(map(id, [fontsize, font, bgchar, fgchar, bgscale, bgrelshift, fgscale, fgrelshift])))))
self.font = font
self.fontsize = fontsize
registry.addresource("Font", self.font.name, self.font, procset="Text")
registry.add(self.font)
self.bb = 0, 0, fontsize, fontsize
self.bgchar = bgchar
self.fgchar = fgchar
if bgscale is None and bgrelshift is not None:
bgscale = 1
if bgscale is not None and bgrelshift is None:
bgrelshift = 0, 0
if bgscale is not None:
self.bgtrafo = "%f 0 0 %f %f %f Tm" % (bgscale, bgscale, bgrelshift[0]*self.fontsize, bgrelshift[1]*self.fontsize)
else:
self.bgtrafo = ""
if fgscale is None and fgrelshift is not None:
fgscale = 1
if fgscale is not None and fgrelshift is None:
fgrelshift = 0, 0
if fgscale is not None:
self.fgtrafo = "%f 0 0 %f %f %f Tm" % (fgscale, fgscale, fgrelshift[0]*self.fontsize, fgrelshift[1]*self.fontsize)
else:
self.fgtrafo = ""
def write(self, file, writer, registry):
content = ""
if self.bgchar:
content += "q BT /%s %f Tf %s (%s) Tj ET Q\n" % (self.font.name, self.fontsize, self.bgtrafo, self.bgchar)
if self.fgchar:
content += "q BT /%s %f Tf %s (%s) Tj ET Q\n" % (self.font.name, self.fontsize, self.fgtrafo, self.fgchar)
if writer.compress:
import zlib
content = zlib.compress(content)
file.write("<<\n")
file.write("/Type /XObject\n")
file.write("/Subtype /Form\n")
file.write("/BBox [%f %f %f %f]\n" % self.bb)
#ile.write("/Matrix [0.98 0.17 -0.17 0.98 0 0]\n")
file.write("/Resources <</Font << /%s %d 0 R >> /ProcSet [/PDF /Text] >>\n" %
(self.font.name, registry.getrefno(self.font)))
file.write("/Length %i\n" % len(content))
if writer.compress:
file.write("/Filter /FlateDecode\n")
file.write(">>\n"
"stream\n")
file.write(content)
file.write("endstream\n")
## Zapf Dingbats symbols for further buttonstates:
# "3" = thin checkmark
# "4" = thick checkmark
# "5" = thin large cross
# "6" = thick large cross
# "7" = thin small cross
# "8" = thick small cross
# "l" = filled circle
# "m" = empty circle
# "n" = filled rectangle
# "o" = empty rectangle (shadow bottom right)
# "p" = empty rectangle (shadow top right)
# "q" = empty box (to bottom right)
# "r" = empty box (to top right)
# >>>
class choicefield(formfield): # <<<
"""An interactive pdf form field for text input.
The name is used for the graphical user interface and for exporing the input data.
Note that the behaviour under rotations is undefined."""
defaultflags = dict(invisible=0, hidden=0, printable=1, nozoom=0,
norotate=0, noview=0, readonly=0, required=0, noexport=0, combo=1,
edit=0, sort=0, multiselect=0, donotspellcheck=1)
def __init__(self, x, y, width, height, name, values, defaultvalue=None, fontsize=10, font=None,
borderwidth=0, align="l", **flags):
self.llx_pt, self.lly_pt = _topt(x), _topt(y)
self.urx_pt, self.ury_pt = _topt(x+width), _topt(y+height)
self.name = name
self.values = values
self.defaultvalue = defaultvalue
self.fontsize_pt = _topt(fontsize, "x", "pt")
self.font = font # TODO: add the generic fonts
self.borderwidth_pt = _topt(borderwidth, "x", "pt")
self.flags = self.selectflags(flags)
self.align = align
def processPDF(self, file, writer, context, registry, bbox):
# the bounding box is transformed by the canvas
bbox += self.bbox()
# the annotation rectangle must be transformed separately:
llx_pt, lly_pt = context.trafo.apply_pt(self.llx_pt, self.lly_pt)
urx_pt, ury_pt = context.trafo.apply_pt(self.urx_pt, self.ury_pt)
fontsize_pt = _sizetrafo(self.fontsize_pt, context.trafo)
borderwidth_pt = _sizetrafo(self.borderwidth_pt, context.trafo)
# we create numbers from the flags given
annotflag, formflag = _pdfflags(self.flags)
alignflag = _pdfalignment(self.align)
registry.add(PDFchoicefield((llx_pt, lly_pt, urx_pt, ury_pt),
self.name, self.values, self.defaultvalue, fontsize_pt, self.font,
borderwidth_pt, alignflag, annotflag, formflag, writer, registry))
# >>>
class PDFchoicefield(pdfwriter.PDFobject): # <<<
def __init__(self, bb_pt, name, values, defaultvalue, fontsize, font,
borderwidth_pt, alignflag, annotflag, formflag, writer, registry):
pdfwriter.PDFobject.__init__(self, "formfield_choice")
# append this formfield to the global document form
# and to the annotation list of the page:
self.PDFform = None
for object in registry.objects:
if object.type == "form":
object.append(self)
self.PDFform = object
elif object.type == "annotations":
object.append(self)
self.name = name
self.bb_pt = bb_pt
self.values = values
self.defaultvalue = defaultvalue
self.fontsize = fontsize
self.font = font
if self.font is None:
self.font = PDFHelvetica
registry.addresource("Font", self.font.name, self.font, procset="Text")
registry.add(self.font)
self.borderwidth_pt = borderwidth_pt
self.alignflag = alignflag
self.formflag = formflag
self.annotflag = annotflag
def write(self, file, writer, registry):
### the dictionary entries for the annotation
file.write("<</Type /Annot\n")
file.write("/P %d 0 R\n" % registry.getrefno(self.PDFform)) # reference to the page objects
file.write("/Rect [%f %f %f %f]\n" % self.bb_pt) # the annotation rectangle
#ile.write("/BS << ... >>\n" # border style dictionary
file.write("/Border [0 0 %f]\n" % self.borderwidth_pt) # border style
file.write("/F %d\n" % self.annotflag)
### the dictionary entries for the widget annotations
file.write("/Subtype /Widget\n")
file.write("/H /N\n") # highlight behaviour
#ile.write("/AP <</N >>\n") # appearance dictionary TODO
### the dictionary entries for the form field
file.write("/FT /Ch\n") # type of the form field
file.write("/T (%s)\n" % self.name) # partial field name
file.write("/TU (%s)\n" % self.name) # field name for the user-interface
file.write("/TM (%s)\n" % self.name) # field name for exporting the data
if self.defaultvalue in self.values:
file.write("/V (%s)\n" % self.defaultvalue) # starting value
file.write("/Ff %d\n" % self.formflag) # flags for various purposes
### the dictionary entries for the text field
file.write("/DR <</Font <</%s %d 0 R >> >>\n" % (self.font.name, registry.getrefno(self.font))) # default resources for appearance
file.write("/DA (/%s %f Tf)\n" % (self.font.name, self.fontsize)) # default appearance string
file.write("/Q %d\n" % self.alignflag)
file.write("/Opt [")
for value in self.values:
file.write(" (%s)" % value)
file.write(" ]\n")
file.write(">>\n")
# >>>
# vim:foldmethod=marker:foldmarker=<<<,>>>
|
quentinlautischer/291MiniProject2
|
refs/heads/master
|
lib/python3.5/site-packages/pip/_vendor/html5lib/treebuilders/_base.py
|
915
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
cedk/odoo
|
refs/heads/8.0
|
openerp/modules/module.py
|
199
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import functools
import imp
import importlib
import inspect
import itertools
import logging
import os
import re
import sys
import time
import unittest
import threading
from os.path import join as opj
import unittest2
import openerp
import openerp.tools as tools
import openerp.release as release
from openerp.tools.safe_eval import safe_eval as eval
MANIFEST = '__openerp__.py'
README = ['README.rst', 'README.md', 'README.txt']
_logger = logging.getLogger(__name__)
# addons path as a list
ad_paths = []
hooked = False
# Modules already loaded
loaded = []
class AddonsImportHook(object):
"""
Import hook to load OpenERP addons from multiple paths.
OpenERP implements its own import-hook to load its addons. OpenERP
addons are Python modules. Originally, they were each living in their
own top-level namespace, e.g. the sale module, or the hr module. For
backward compatibility, `import <module>` is still supported. Now they
are living in `openerp.addons`. The good way to import such modules is
thus `import openerp.addons.module`.
"""
def find_module(self, module_name, package_path):
module_parts = module_name.split('.')
if len(module_parts) == 3 and module_name.startswith('openerp.addons.'):
return self # We act as a loader too.
def load_module(self, module_name):
if module_name in sys.modules:
return sys.modules[module_name]
_1, _2, module_part = module_name.split('.')
# Note: we don't support circular import.
f, path, descr = imp.find_module(module_part, ad_paths)
mod = imp.load_module('openerp.addons.' + module_part, f, path, descr)
sys.modules['openerp.addons.' + module_part] = mod
return mod
def initialize_sys_path():
"""
Setup an import-hook to be able to import OpenERP addons from the different
addons paths.
This ensures something like ``import crm`` (or even
``import openerp.addons.crm``) works even if the addons are not in the
PYTHONPATH.
"""
global ad_paths
global hooked
dd = tools.config.addons_data_dir
if dd not in ad_paths:
ad_paths.append(dd)
for ad in tools.config['addons_path'].split(','):
ad = os.path.abspath(tools.ustr(ad.strip()))
if ad not in ad_paths:
ad_paths.append(ad)
# add base module path
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons'))
if base_path not in ad_paths:
ad_paths.append(base_path)
if not hooked:
sys.meta_path.append(AddonsImportHook())
hooked = True
def get_module_path(module, downloaded=False, display_warning=True):
"""Return the path of the given module.
Search the addons paths and return the first path where the given
module is found. If downloaded is True, return the default addons
path if nothing else is found.
"""
initialize_sys_path()
for adp in ad_paths:
if os.path.exists(opj(adp, module)) or os.path.exists(opj(adp, '%s.zip' % module)):
return opj(adp, module)
if downloaded:
return opj(tools.config.addons_data_dir, module)
if display_warning:
_logger.warning('module %s: module not found', module)
return False
def get_module_filetree(module, dir='.'):
path = get_module_path(module)
if not path:
return False
dir = os.path.normpath(dir)
if dir == '.':
dir = ''
if dir.startswith('..') or (dir and dir[0] == '/'):
raise Exception('Cannot access file outside the module')
files = openerp.tools.osutil.listdir(path, True)
tree = {}
for f in files:
if not f.startswith(dir):
continue
if dir:
f = f[len(dir)+int(not dir.endswith('/')):]
lst = f.split(os.sep)
current = tree
while len(lst) != 1:
current = current.setdefault(lst.pop(0), {})
current[lst.pop(0)] = None
return tree
def get_module_resource(module, *args):
"""Return the full path of a resource of the given module.
:param module: module name
:param list(str) args: resource path components within module
:rtype: str
:return: absolute path to the resource
TODO name it get_resource_path
TODO make it available inside on osv object (self.get_resource_path)
"""
mod_path = get_module_path(module)
if not mod_path: return False
resource_path = opj(mod_path, *args)
if os.path.isdir(mod_path):
# the module is a directory - ignore zip behavior
if os.path.exists(resource_path):
return resource_path
return False
def get_module_icon(module):
iconpath = ['static', 'description', 'icon.png']
if get_module_resource(module, *iconpath):
return ('/' + module + '/') + '/'.join(iconpath)
return '/base/' + '/'.join(iconpath)
def get_module_root(path):
"""
Get closest module's root begining from path
# Given:
# /foo/bar/module_dir/static/src/...
get_module_root('/foo/bar/module_dir/static/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar/module_dir/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar')
# returns None
@param path: Path from which the lookup should start
@return: Module root path or None if not found
"""
while not os.path.exists(os.path.join(path, MANIFEST)):
new_path = os.path.abspath(os.path.join(path, os.pardir))
if path == new_path:
return None
path = new_path
return path
def load_information_from_description_file(module, mod_path=None):
"""
:param module: The name of the module (sale, purchase, ...)
:param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...)
"""
if not mod_path:
mod_path = get_module_path(module)
terp_file = mod_path and opj(mod_path, MANIFEST) or False
if terp_file:
info = {}
if os.path.isfile(terp_file):
# default values for descriptor
info = {
'application': False,
'author': '',
'auto_install': False,
'category': 'Uncategorized',
'depends': [],
'description': '',
'icon': get_module_icon(module),
'installable': True,
'license': 'AGPL-3',
'post_load': None,
'version': '1.0',
'web': False,
'website': '',
'sequence': 100,
'summary': '',
}
info.update(itertools.izip(
'depends data demo test init_xml update_xml demo_xml'.split(),
iter(list, None)))
f = tools.file_open(terp_file)
try:
info.update(eval(f.read()))
finally:
f.close()
if not info.get('description'):
readme_path = [opj(mod_path, x) for x in README
if os.path.isfile(opj(mod_path, x))]
if readme_path:
readme_text = tools.file_open(readme_path[0]).read()
info['description'] = readme_text
if 'active' in info:
# 'active' has been renamed 'auto_install'
info['auto_install'] = info['active']
info['version'] = adapt_version(info['version'])
return info
#TODO: refactor the logger in this file to follow the logging guidelines
# for 6.0
_logger.debug('module %s: no %s file found.', module, MANIFEST)
return {}
def init_module_models(cr, module_name, obj_list):
""" Initialize a list of models.
Call _auto_init and init on each model to create or update the
database tables supporting the models.
TODO better explanation of _auto_init and init.
"""
_logger.info('module %s: creating or updating database tables', module_name)
todo = []
for obj in obj_list:
result = obj._auto_init(cr, {'module': module_name})
if result:
todo += result
if hasattr(obj, 'init'):
obj.init(cr)
cr.commit()
for obj in obj_list:
obj._auto_end(cr, {'module': module_name})
cr.commit()
todo.sort(key=lambda x: x[0])
for t in todo:
t[1](cr, *t[2])
cr.commit()
def load_openerp_module(module_name):
""" Load an OpenERP module, if not already loaded.
This loads the module and register all of its models, thanks to either
the MetaModel metaclass, or the explicit instantiation of the model.
This is also used to load server-wide module (i.e. it is also used
when there is no model to register).
"""
global loaded
if module_name in loaded:
return
initialize_sys_path()
try:
mod_path = get_module_path(module_name)
__import__('openerp.addons.' + module_name)
# Call the module's post-load hook. This can done before any model or
# data has been initialized. This is ok as the post-load hook is for
# server-wide (instead of registry-specific) functionalities.
info = load_information_from_description_file(module_name)
if info['post_load']:
getattr(sys.modules['openerp.addons.' + module_name], info['post_load'])()
except Exception, e:
msg = "Couldn't load module %s" % (module_name)
_logger.critical(msg)
_logger.critical(e)
raise
else:
loaded.append(module_name)
def get_modules():
"""Returns the list of module names
"""
def listdir(dir):
def clean(name):
name = os.path.basename(name)
if name[-4:] == '.zip':
name = name[:-4]
return name
def is_really_module(name):
manifest_name = opj(dir, name, MANIFEST)
zipfile_name = opj(dir, name)
return os.path.isfile(manifest_name)
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
initialize_sys_path()
for ad in ad_paths:
plist.extend(listdir(ad))
return list(set(plist))
def get_modules_with_version():
modules = get_modules()
res = dict.fromkeys(modules, adapt_version('1.0'))
for module in modules:
try:
info = load_information_from_description_file(module)
res[module] = info['version']
except Exception:
continue
return res
def adapt_version(version):
serie = release.major_version
if version == serie or not version.startswith(serie + '.'):
version = '%s.%s' % (serie, version)
return version
def get_test_modules(module):
""" Return a list of module for the addons potentially containing tests to
feed unittest2.TestLoader.loadTestsFromModule() """
# Try to import the module
modpath = 'openerp.addons.' + module
try:
mod = importlib.import_module('.tests', modpath)
except Exception, e:
# If module has no `tests` sub-module, no problem.
if str(e) != 'No module named tests':
_logger.exception('Can not `import %s`.', module)
return []
if hasattr(mod, 'fast_suite') or hasattr(mod, 'checks'):
_logger.warn(
"Found deprecated fast_suite or checks attribute in test module "
"%s. These have no effect in or after version 8.0.",
mod.__name__)
result = [mod_obj for name, mod_obj in inspect.getmembers(mod, inspect.ismodule)
if name.startswith('test_')]
return result
# Use a custom stream object to log the test executions.
class TestStream(object):
def __init__(self, logger_name='openerp.tests'):
self.logger = logging.getLogger(logger_name)
self.r = re.compile(r'^-*$|^ *... *$|^ok$')
def flush(self):
pass
def write(self, s):
if self.r.match(s):
return
first = True
level = logging.ERROR if s.startswith(('ERROR', 'FAIL', 'Traceback')) else logging.INFO
for c in s.splitlines():
if not first:
c = '` ' + c
first = False
self.logger.log(level, c)
current_test = None
def runs_at(test, hook, default):
# by default, tests do not run post install
test_runs = getattr(test, hook, default)
# for a test suite, we're done
if not isinstance(test, unittest.TestCase):
return test_runs
# otherwise check the current test method to see it's been set to a
# different state
method = getattr(test, test._testMethodName)
return getattr(method, hook, test_runs)
runs_at_install = functools.partial(runs_at, hook='at_install', default=True)
runs_post_install = functools.partial(runs_at, hook='post_install', default=False)
def run_unit_tests(module_name, dbname, position=runs_at_install):
"""
:returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
if any of them failed.
:rtype: bool
"""
global current_test
current_test = module_name
mods = get_test_modules(module_name)
threading.currentThread().testing = True
r = True
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(position, tests))
if suite.countTestCases():
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
_logger.info('%s running tests.', m.__name__)
result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
if time.time() - t0 > 5:
_logger.log(25, "%s tested in %.2fs, %s queries", m.__name__, time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
if not result.wasSuccessful():
r = False
_logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors))
current_test = None
threading.currentThread().testing = False
return r
def unwrap_suite(test):
"""
Attempts to unpack testsuites (holding suites or cases) in order to
generate a single stream of terminals (either test cases or customized
test suites). These can then be checked for run/skip attributes
individually.
An alternative would be to use a variant of @unittest2.skipIf with a state
flag of some sort e.g. @unittest2.skipIf(common.runstate != 'at_install'),
but then things become weird with post_install as tests should *not* run
by default there
"""
if isinstance(test, unittest.TestCase):
yield test
return
subtests = list(test)
# custom test suite (no test cases)
if not len(subtests):
yield test
return
for item in itertools.chain.from_iterable(
itertools.imap(unwrap_suite, subtests)):
yield item
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hutchison/bp_mgmt
|
refs/heads/public
|
bp_cupid/migrations/0018_praxis_related_names.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bp_cupid', '0017_block_unique'),
]
operations = [
migrations.AlterField(
model_name='praxis',
name='landkreis',
field=models.ForeignKey(related_name='praxen', null=True, to='bp_cupid.Landkreis'),
),
migrations.AlterField(
model_name='praxis',
name='zeitraeume',
field=models.ManyToManyField(verbose_name='Zeiträume', db_table='praxis_zeitraum', related_name='praxen', blank=True, to='bp_cupid.Zeitraum'),
),
]
|
jcoady9/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_operator.py
|
67
|
import operator
import unittest
from test import support
class Seq1:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class Seq2(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class OperatorTestCase(unittest.TestCase):
def test_lt(self):
self.assertRaises(TypeError, operator.lt)
self.assertRaises(TypeError, operator.lt, 1j, 2j)
self.assertFalse(operator.lt(1, 0))
self.assertFalse(operator.lt(1, 0.0))
self.assertFalse(operator.lt(1, 1))
self.assertFalse(operator.lt(1, 1.0))
self.assertTrue(operator.lt(1, 2))
self.assertTrue(operator.lt(1, 2.0))
def test_le(self):
self.assertRaises(TypeError, operator.le)
self.assertRaises(TypeError, operator.le, 1j, 2j)
self.assertFalse(operator.le(1, 0))
self.assertFalse(operator.le(1, 0.0))
self.assertTrue(operator.le(1, 1))
self.assertTrue(operator.le(1, 1.0))
self.assertTrue(operator.le(1, 2))
self.assertTrue(operator.le(1, 2.0))
def test_eq(self):
class C(object):
def __eq__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.eq)
self.assertRaises(SyntaxError, operator.eq, C(), C())
self.assertFalse(operator.eq(1, 0))
self.assertFalse(operator.eq(1, 0.0))
self.assertTrue(operator.eq(1, 1))
self.assertTrue(operator.eq(1, 1.0))
self.assertFalse(operator.eq(1, 2))
self.assertFalse(operator.eq(1, 2.0))
def test_ne(self):
class C(object):
def __ne__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.ne)
self.assertRaises(SyntaxError, operator.ne, C(), C())
self.assertTrue(operator.ne(1, 0))
self.assertTrue(operator.ne(1, 0.0))
self.assertFalse(operator.ne(1, 1))
self.assertFalse(operator.ne(1, 1.0))
self.assertTrue(operator.ne(1, 2))
self.assertTrue(operator.ne(1, 2.0))
def test_ge(self):
self.assertRaises(TypeError, operator.ge)
self.assertRaises(TypeError, operator.ge, 1j, 2j)
self.assertTrue(operator.ge(1, 0))
self.assertTrue(operator.ge(1, 0.0))
self.assertTrue(operator.ge(1, 1))
self.assertTrue(operator.ge(1, 1.0))
self.assertFalse(operator.ge(1, 2))
self.assertFalse(operator.ge(1, 2.0))
def test_gt(self):
self.assertRaises(TypeError, operator.gt)
self.assertRaises(TypeError, operator.gt, 1j, 2j)
self.assertTrue(operator.gt(1, 0))
self.assertTrue(operator.gt(1, 0.0))
self.assertFalse(operator.gt(1, 1))
self.assertFalse(operator.gt(1, 1.0))
self.assertFalse(operator.gt(1, 2))
self.assertFalse(operator.gt(1, 2.0))
def test_abs(self):
self.assertRaises(TypeError, operator.abs)
self.assertRaises(TypeError, operator.abs, None)
self.assertEqual(operator.abs(-1), 1)
self.assertEqual(operator.abs(1), 1)
def test_add(self):
self.assertRaises(TypeError, operator.add)
self.assertRaises(TypeError, operator.add, None, None)
self.assertTrue(operator.add(3, 4) == 7)
def test_bitwise_and(self):
self.assertRaises(TypeError, operator.and_)
self.assertRaises(TypeError, operator.and_, None, None)
self.assertTrue(operator.and_(0xf, 0xa) == 0xa)
def test_concat(self):
self.assertRaises(TypeError, operator.concat)
self.assertRaises(TypeError, operator.concat, None, None)
self.assertTrue(operator.concat('py', 'thon') == 'python')
self.assertTrue(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
self.assertTrue(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
self.assertTrue(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
self.assertRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
self.assertRaises(TypeError, operator.countOf)
self.assertRaises(TypeError, operator.countOf, None, None)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
def test_delitem(self):
a = [4, 3, 2, 1]
self.assertRaises(TypeError, operator.delitem, a)
self.assertRaises(TypeError, operator.delitem, a, None)
self.assertTrue(operator.delitem(a, 1) is None)
self.assertTrue(a == [4, 2, 1])
def test_floordiv(self):
self.assertRaises(TypeError, operator.floordiv, 5)
self.assertRaises(TypeError, operator.floordiv, None, None)
self.assertTrue(operator.floordiv(5, 2) == 2)
def test_truediv(self):
self.assertRaises(TypeError, operator.truediv, 5)
self.assertRaises(TypeError, operator.truediv, None, None)
self.assertTrue(operator.truediv(5, 2) == 2.5)
def test_getitem(self):
a = range(10)
self.assertRaises(TypeError, operator.getitem)
self.assertRaises(TypeError, operator.getitem, a, None)
self.assertTrue(operator.getitem(a, 2) == 2)
def test_indexOf(self):
self.assertRaises(TypeError, operator.indexOf)
self.assertRaises(TypeError, operator.indexOf, None, None)
self.assertTrue(operator.indexOf([4, 3, 2, 1], 3) == 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
def test_invert(self):
self.assertRaises(TypeError, operator.invert)
self.assertRaises(TypeError, operator.invert, None)
self.assertEqual(operator.inv(4), -5)
def test_lshift(self):
self.assertRaises(TypeError, operator.lshift)
self.assertRaises(TypeError, operator.lshift, None, 42)
self.assertTrue(operator.lshift(5, 1) == 10)
self.assertTrue(operator.lshift(5, 0) == 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
self.assertRaises(TypeError, operator.mod)
self.assertRaises(TypeError, operator.mod, None, 42)
self.assertTrue(operator.mod(5, 2) == 1)
def test_mul(self):
self.assertRaises(TypeError, operator.mul)
self.assertRaises(TypeError, operator.mul, None, None)
self.assertTrue(operator.mul(5, 2) == 10)
def test_neg(self):
self.assertRaises(TypeError, operator.neg)
self.assertRaises(TypeError, operator.neg, None)
self.assertEqual(operator.neg(5), -5)
self.assertEqual(operator.neg(-5), 5)
self.assertEqual(operator.neg(0), 0)
self.assertEqual(operator.neg(-0), 0)
def test_bitwise_or(self):
self.assertRaises(TypeError, operator.or_)
self.assertRaises(TypeError, operator.or_, None, None)
self.assertTrue(operator.or_(0xa, 0x5) == 0xf)
def test_pos(self):
self.assertRaises(TypeError, operator.pos)
self.assertRaises(TypeError, operator.pos, None)
self.assertEqual(operator.pos(5), 5)
self.assertEqual(operator.pos(-5), -5)
self.assertEqual(operator.pos(0), 0)
self.assertEqual(operator.pos(-0), 0)
def test_pow(self):
self.assertRaises(TypeError, operator.pow)
self.assertRaises(TypeError, operator.pow, None, None)
self.assertEqual(operator.pow(3,5), 3**5)
self.assertEqual(operator.__pow__(3,5), 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_rshift(self):
self.assertRaises(TypeError, operator.rshift)
self.assertRaises(TypeError, operator.rshift, None, 42)
self.assertTrue(operator.rshift(5, 1) == 2)
self.assertTrue(operator.rshift(5, 0) == 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
def test_setitem(self):
a = list(range(3))
self.assertRaises(TypeError, operator.setitem, a)
self.assertRaises(TypeError, operator.setitem, a, None, None)
self.assertTrue(operator.setitem(a, 0, 2) is None)
self.assertTrue(a == [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_sub(self):
self.assertRaises(TypeError, operator.sub)
self.assertRaises(TypeError, operator.sub, None, None)
self.assertTrue(operator.sub(5, 2) == 3)
def test_truth(self):
class C(object):
def __bool__(self):
raise SyntaxError
self.assertRaises(TypeError, operator.truth)
self.assertRaises(SyntaxError, operator.truth, C())
self.assertTrue(operator.truth(5))
self.assertTrue(operator.truth([0]))
self.assertFalse(operator.truth(0))
self.assertFalse(operator.truth([]))
def test_bitwise_xor(self):
self.assertRaises(TypeError, operator.xor)
self.assertRaises(TypeError, operator.xor, None, None)
self.assertTrue(operator.xor(0xb, 0xc) == 0x7)
def test_is(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_)
self.assertTrue(operator.is_(a, b))
self.assertFalse(operator.is_(a,c))
def test_is_not(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_not)
self.assertFalse(operator.is_not(a, b))
self.assertTrue(operator.is_not(a,c))
def test_attrgetter(self):
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
self.assertRaises(TypeError, operator.attrgetter, 2)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter, ('x', (), 'y'))
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('child.')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('.child')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(list(map(getcount, inventory)), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = list(map(str, range(20)))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
def test_methodcaller(self):
self.assertRaises(TypeError, operator.methodcaller)
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEqual(f(a), 3)
f = operator.methodcaller('bar')
self.assertEqual(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEqual(f(a), 5)
def test_inplace(self):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
self.assertEqual(operator.__iadd__ (c, 5), "iadd")
self.assertEqual(operator.__iand__ (c, 5), "iand")
self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
self.assertEqual(operator.__ilshift__ (c, 5), "ilshift")
self.assertEqual(operator.__imod__ (c, 5), "imod")
self.assertEqual(operator.__imul__ (c, 5), "imul")
self.assertEqual(operator.__ior__ (c, 5), "ior")
self.assertEqual(operator.__ipow__ (c, 5), "ipow")
self.assertEqual(operator.__irshift__ (c, 5), "irshift")
self.assertEqual(operator.__isub__ (c, 5), "isub")
self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
self.assertEqual(operator.__ixor__ (c, 5), "ixor")
self.assertEqual(operator.__iconcat__ (c, c), "iadd")
def test_main(verbose=None):
import sys
test_classes = (
OperatorTestCase,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
peralmq/olloapi
|
refs/heads/master
|
models.py
|
1
|
from google.appengine.ext import ndb
class Token(ndb.Model):
facebook_id = ndb.IntegerProperty()
facebook_access_token = ndb.StringProperty()
access_token = ndb.StringProperty()
class Location(ndb.Model):
longitude = ndb.FloatProperty()
latitude = ndb.FloatProperty()
message = ndb.TextProperty()
create_time = ndb.DateTimeProperty(auto_now_add=True)
class User(ndb.Model):
facebook_id = ndb.IntegerProperty()
name = ndb.StringProperty()
photo = ndb.StringProperty()
location = ndb.StructuredProperty(Location)
friends = ndb.JsonProperty()
|
s0undt3ch/Deluge
|
refs/heads/master
|
deluge/plugins/AutoAdd/deluge/plugins/autoadd/webui.py
|
8
|
#
# webui.py
#
# Copyright (C) 2009 GazpachoKing <chase.sterling@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
import logging
from deluge.ui.client import client
from deluge import component
from deluge.plugins.pluginbase import WebPluginBase
from common import get_resource
log = logging.getLogger(__name__)
class WebUI(WebPluginBase):
scripts = [get_resource("autoadd.js")]
def enable(self):
pass
def disable(self):
pass
|
pambros/CNN-2D-X-Ray-Catheter-Detection
|
refs/heads/master
|
python/common/System.py
|
1
|
import subprocess
import platform
def CallCommand(_command, _getOutput=False, _verbose=True):
if _verbose == True:
print(_command)
if _getOutput == True:
ret = subprocess.Popen(_command, shell=True, stdout=subprocess.PIPE)
# tmp = ret.stdout.read()
out, err = ret.communicate()
# print(out)
# print(err)
# print(ret)
errcode = ret.returncode
assert errcode == 0
return out
# ret = os.system(_command)
ret = subprocess.call(_command, shell=True)
print(ret)
assert ret == 0
return
# if you launch an executable, do not put .exe at the end of _command. It'll be added after following the os system.
def CallExecutable(_command, _args, _getOutput=False, _verbose=True):
if platform.system() == 'Windows':
# _command = SetWindowsPathNorm(_command)
# dir = os.path.dirname(_command)
# file = os.path.basename(_command)
# dir = os.path.abspath(dir)
# _command = '"' + dir + '\\' + file + ".exe" + '"'
_command = '"' + _command + ".exe" + '"'
# _command = "start /REALTIME \"\" \"" + _command + "\""
# _command = "start /HIGH \"\" \"" + _command + "\""
_command = _command + " " + _args
return CallCommand(_command, _getOutput, _verbose)
|
pataquets/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
huntxu/neutron
|
refs/heads/master
|
neutron/agent/l3/keepalived_state_change.py
|
3
|
# Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import signal
import sys
import httplib2
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.l3 import ha
from neutron.agent.linux import daemon
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ip_monitor
from neutron.agent.linux import utils as agent_utils
from neutron.common import config
from neutron.conf.agent.l3 import keepalived
LOG = logging.getLogger(__name__)
class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection):
def __init__(self, *args, **kwargs):
# Old style super initialization is required!
agent_utils.UnixDomainHTTPConnection.__init__(
self, *args, **kwargs)
self.socket_path = (
ha.L3AgentKeepalivedStateChangeServer.
get_keepalived_state_change_socket_path(cfg.CONF))
class MonitorDaemon(daemon.Daemon):
def __init__(self, pidfile, router_id, user, group, namespace, conf_dir,
interface, cidr):
self.router_id = router_id
self.namespace = namespace
self.conf_dir = conf_dir
self.interface = interface
self.cidr = cidr
self.monitor = None
super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
user=user, group=group)
def run(self, run_as_root=False):
self.monitor = ip_monitor.IPMonitor(namespace=self.namespace,
run_as_root=run_as_root)
self.monitor.start()
# Only drop privileges if the process is currently running as root
# (The run_as_root variable name here is unfortunate - It means to
# use a root helper when the running process is NOT already running
# as root
if not run_as_root:
super(MonitorDaemon, self).run()
for iterable in self.monitor:
self.parse_and_handle_event(iterable)
def parse_and_handle_event(self, iterable):
try:
event = ip_monitor.IPMonitorEvent.from_text(iterable)
if event.interface == self.interface and event.cidr == self.cidr:
new_state = 'master' if event.added else 'backup'
self.write_state_change(new_state)
self.notify_agent(new_state)
elif event.interface != self.interface and event.added:
# Send GARPs for all new router interfaces.
# REVISIT(jlibosva): keepalived versions 1.2.19 and below
# contain bug where gratuitous ARPs are not sent on receiving
# SIGHUP signal. This is a workaround to this bug. keepalived
# has this issue fixed since 1.2.20 but the version is not
# packaged in some distributions (RHEL/CentOS/Ubuntu Xenial).
# Remove this code once new keepalived versions are available.
self.send_garp(event)
except Exception:
LOG.exception('Failed to process or handle event for line %s',
iterable)
def write_state_change(self, state):
with open(os.path.join(
self.conf_dir, 'state'), 'w') as state_file:
state_file.write(state)
LOG.debug('Wrote router %s state %s', self.router_id, state)
def notify_agent(self, state):
resp, content = httplib2.Http().request(
# Note that the message is sent via a Unix domain socket so that
# the URL doesn't matter.
'http://127.0.0.1/',
headers={'X-Neutron-Router-Id': self.router_id,
'X-Neutron-State': state},
connection_type=KeepalivedUnixDomainConnection)
if resp.status != 200:
raise Exception(_('Unexpected response: %s') % resp)
LOG.debug('Notified agent router %s, state %s', self.router_id, state)
def send_garp(self, event):
"""Send gratuitous ARP for given event."""
ip_lib.send_ip_addr_adv_notif(
self.namespace,
event.interface,
str(netaddr.IPNetwork(event.cidr).ip),
log_exception=False
)
def _kill_monitor(self):
if self.monitor:
# Kill PID instead of calling self.monitor.stop() because the ip
# monitor is running as root while keepalived-state-change is not
# (dropped privileges after launching the ip monitor) and will fail
# with "Permission denied". Also, we can safely do this because the
# monitor was launched with respawn_interval=None so it won't be
# automatically respawned
agent_utils.kill_process(self.monitor.pid, signal.SIGKILL,
run_as_root=True)
def handle_sigterm(self, signum, frame):
self._kill_monitor()
super(MonitorDaemon, self).handle_sigterm(signum, frame)
def configure(conf):
config.init(sys.argv[1:])
conf.set_override('log_dir', cfg.CONF.conf_dir)
conf.set_override('debug', True)
conf.set_override('use_syslog', True)
config.setup_logging()
def main():
keepalived.register_cli_l3_agent_keepalived_opts()
keepalived.register_l3_agent_keepalived_opts()
configure(cfg.CONF)
MonitorDaemon(cfg.CONF.pid_file,
cfg.CONF.router_id,
cfg.CONF.user,
cfg.CONF.group,
cfg.CONF.namespace,
cfg.CONF.conf_dir,
cfg.CONF.monitor_interface,
cfg.CONF.monitor_cidr).start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.