code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
# Author: Rob Sanderson (azaroth@liv.ac.uk)
# Distributed and Usable under the GPL
# Version: 1.7
# Most Recent Changes: contexts, new modifier style for 1.1
#
# With thanks to Adam from IndexData and Mike Taylor for their valuable input
from shlex import shlex
from xml.sax.saxutils import escape
from xml.dom.minidom import Node, parseString
from PyZ3950.SRWDiagnostics import *
# Don't use cStringIO as it borks Unicode (apparently)
from StringIO import StringIO
import types
# Parsing strictness flags
errorOnEmptyTerm = 0 # index = "" (often meaningless)
errorOnQuotedIdentifier = 0 # "/foo/bar" = "" (unnecessary BNF restriction)
errorOnDuplicatePrefix = 0 # >a=b >a=c "" (impossible due to BNF)
fullResultSetNameCheck = 1 # srw.rsn=foo and srw.rsn=foo (mutant!!)
# Base values for CQL
serverChoiceRelation = "scr"
serverChoiceIndex = "cql.serverchoice"
order = ['=', '>', '>=', '<', '<=', '<>']
modifierSeparator = "/"
booleans = ['and', 'or', 'not', 'prox']
reservedPrefixes = {"srw" : "http://www.loc.gov/zing/cql/srw-indexes/v1.0/",
"cql" : "info:srw/cql-context-set/1/cql-v1.1"}
XCQLNamespace = "http://www.loc.gov/zing/cql/xcql/"
# End of 'configurable' stuff
class PrefixableObject:
"Root object for triple and searchClause"
prefixes = {}
parent = None
config = None
def __init__(self):
self.prefixes = {}
self.parent = None
self.config = None
def toXCQL(self, depth=0):
# Just generate our prefixes
space = " " * depth
xml = ['%s<prefixes>\n' % (space)]
for p in self.prefixes.keys():
xml.append("%s <prefix>\n%s <name>%s</name>\n%s <identifier>%s</identifier>\n%s </prefix>\n" % (space, space, escape(p), space, escape(self.prefixes[p]), space))
xml.append("%s</prefixes>\n" % (space))
return ''.join(xml)
def addPrefix(self, name, identifier):
if (errorOnDuplicatePrefix and (self.prefixes.has_key(name) or reservedPrefixes.has_key(name))):
# Maybe error
diag = Diagnostic45()
diag.details = name
raise diag;
self.prefixes[name] = identifier
def resolvePrefix(self, name):
# Climb tree
if (reservedPrefixes.has_key(name)):
return reservedPrefixes[name]
elif (self.prefixes.has_key(name)):
return self.prefixes[name]
elif (self.parent <> None):
return self.parent.resolvePrefix(name)
elif (self.config <> None):
# Config is some sort of server config which specifies defaults
return self.config.resolvePrefix(name)
else:
# Top of tree, no config, no resolution->Unknown indexset
# For client we need to allow no prefix?
#diag = Diagnostic15()
#diag.details = name
#raise diag
return None
class PrefixedObject:
"Root object for relation, relationModifier and index"
prefix = ""
prefixURI = ""
value = ""
parent = None
def __init__(self, val):
# All prefixed things are case insensitive
val = val.lower()
if val and val[0] == '"' and val[-1] == '"':
if errorOnQuotedIdentifier:
diag = Diagnostic14()
diag.details = val
raise diag
else:
val = val[1:-1]
self.value = val
self.splitValue()
def __str__(self):
if (self.prefix):
return "%s.%s" % (self.prefix, self.value)
else:
return self.value
def splitValue(self):
f = self.value.find(".")
if (self.value.count('.') > 1):
diag = Diagnostic15()
diag.details = "Multiple '.' characters: %s" % (self.value)
raise(diag)
elif (f == 0):
diag = Diagnostic15()
diag.details = "Null indexset: %s" % (irt.index)
raise(diag)
elif f >= 0:
self.prefix = self.value[:f].lower()
self.value = self.value[f+1:].lower()
def resolvePrefix(self):
if (not self.prefixURI):
self.prefixURI = self.parent.resolvePrefix(self.prefix)
return self.prefixURI
class ModifiableObject:
# Treat modifiers as keys on boolean/relation?
modifiers = []
def __getitem__(self, k):
if (type(k) == types.IntType):
try:
return self.modifiers[k]
except:
return None
for m in self.modifiers:
if (str(m.type) == k or m.type.value == k):
return m
return None
class Triple (PrefixableObject):
"Object to represent a CQL triple"
leftOperand = None
boolean = None
rightOperand = None
def toXCQL(self, depth=0):
"Create the XCQL representation of the object"
space = " " * depth
if (depth == 0):
xml = ['<triple xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<triple>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.boolean.toXCQL(depth+1))
xml.append("%s <leftOperand>\n" % (space))
xml.append(self.leftOperand.toXCQL(depth+2))
xml.append("%s </leftOperand>\n" % (space))
xml.append("%s <rightOperand>\n" % (space))
xml.append(self.rightOperand.toXCQL(depth+2))
xml.append("%s </rightOperand>\n" % (space))
xml.append("%s</triple>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = []
if (self.prefixes):
for p in self.prefixes.keys():
if (p <> ''):
txt.append('>%s="%s"' % (p, self.prefixes[p]))
else:
txt.append('>"%s"' % (self.prefixes[p]))
prefs = ' '.join(txt)
return "(%s %s %s %s)" % (prefs, self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
else:
return "(%s %s %s)" % (self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
def getResultSetId(self, top=None):
if fullResultSetNameCheck == 0 or self.boolean.value in ['not', 'prox']:
return ""
if top == None:
topLevel = 1
top = self;
else:
topLevel = 0
# Iterate over operands and build a list
rsList = []
if isinstance(self.leftOperand, Triple):
rsList.extend(self.leftOperand.getResultSetId(top))
else:
rsList.append(self.leftOperand.getResultSetId(top))
if isinstance(self.rightOperand, Triple):
rsList.extend(self.rightOperand.getResultSetId(top))
else:
rsList.append(self.rightOperand.getResultSetId(top))
if topLevel == 1:
# Check all elements are the same, if so we're a fubar form of present
if (len(rsList) == rsList.count(rsList[0])):
return rsList[0]
else:
return ""
else:
return rsList
class SearchClause (PrefixableObject):
"Object to represent a CQL searchClause"
index = None
relation = None
term = None
def __init__(self, ind, rel, t):
PrefixableObject.__init__(self)
self.index = ind
self.relation = rel
self.term = t
ind.parent = self
rel.parent = self
t.parent = self
def toXCQL(self, depth=0):
"Produce XCQL version of the object"
space = " " * depth
if (depth == 0):
xml = ['<searchClause xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<searchClause>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.index.toXCQL(depth+1))
xml.append(self.relation.toXCQL(depth+1))
xml.append(self.term.toXCQL(depth+1))
xml.append("%s</searchClause>\n" % (space))
return ''.join(xml)
def toCQL(self):
text = []
for p in self.prefixes.keys():
if (p <> ''):
text.append('>%s="%s"' % (p, self.prefixes[p]))
else:
text.append('>"%s"' % (self.prefixes[p]))
text.append('%s %s "%s"' % (self.index, self.relation.toCQL(), self.term))
return ' '.join(text)
def getResultSetId(self, top=None):
idx = self.index
idx.resolvePrefix()
if (idx.prefixURI == reservedPrefixes['cql'] and idx.value.lower() == 'resultsetid'):
return self.term.value
else:
return ""
class Index(PrefixedObject):
"Object to represent a CQL index"
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<index%s>%s</index>\n" % (" "*depth, ns, escape(str(self)))
def toCQL(self):
return str(self)
class Relation(PrefixedObject, ModifiableObject):
"Object to represent a CQL relation"
def __init__(self, rel, mods=[]):
self.prefix = "cql"
PrefixedObject.__init__(self, rel)
self.modifiers = mods
for m in mods:
m.parent = self
def toXCQL(self, depth=0):
"Create XCQL representation of object"
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
space = " " * depth
xml = ["%s<relation%s>\n" % (space, ns)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</relation>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
txt.extend(map(str, self.modifiers))
return '/'.join(txt)
class Term:
value = ""
def __init__(self, v):
if (v <> ""):
# Unquoted literal
if v in ['>=', '<=', '>', '<', '<>', "/", '=']:
diag = Diagnostic25()
diag.details = self.value
raise diag
# Check existence of meaningful term
nonanchor = 0
for c in v:
if c != "^":
nonanchor = 1
break
if not nonanchor:
diag = Diagnostic32()
diag.details = "Only anchoring charater(s) in term: " + v
raise diag
# Unescape quotes
if (v[0] == '"' and v[-1] == '"'):
v = v[1:-1]
v = v.replace('\\"', '"')
if (not v and errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
# Check for badly placed \s
startidx = 0
idx = v.find("\\", startidx)
while (idx > -1):
startidx = idx+1
if not irt.term[idx+1] in ['?', '\\', '*', '^']:
diag = Diagnostic26()
diag.details = irt.term
raise diag
v = v.find("\\", startidx)
elif (errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
self.value = v
def __str__(self):
return self.value
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<term%s>%s</term>\n" % (" "*depth, ns, escape(self.value))
class Boolean(ModifiableObject):
"Object to represent a CQL boolean"
value = ""
parent = None
def __init__(self, bool, mods=[]):
self.value = bool
self.modifiers = mods
self.parent = None
def toXCQL(self, depth=0):
"Create XCQL representation of object"
space = " " * depth
xml = ["%s<boolean>\n" % (space)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</boolean>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
for m in self.modifiers:
txt.append(m.toCQL())
return '/'.join(txt)
def resolvePrefix(self, name):
return self.parent.resolvePrefix(name)
class ModifierType(PrefixedObject):
# Same as index, but we'll XCQLify in ModifierClause
parent = None
prefix = "cql"
class ModifierClause:
"Object to represent a relation modifier"
parent = None
type = None
comparison = ""
value = ""
def __init__(self, type, comp="", val=""):
self.type = ModifierType(type)
self.type.parent = self
self.comparison = comp
self.value = val
def __str__(self):
if (self.value):
return "%s%s%s" % (str(self.type), self.comparison, self.value)
else:
return "%s" % (str(self.type))
def toXCQL(self, depth=0):
if (self.value):
return "%s<modifier>\n%s<type>%s</type>\n%s<comparison>%s</comparison>\n%s<value>%s</value>\n%s</modifier>\n" % (" " * depth, " " * (depth+1), escape(str(self.type)), " " * (depth+1), escape(self.comparison), " " * (depth+1), escape(self.value), " " * depth)
else:
return "%s<modifier><type>%s</type></modifier>\n" % (" " * depth, escape(str(self.type)))
def toCQL(self):
return str(self)
def resolvePrefix(self, name):
# Need to skip parent, which has its own resolvePrefix
# eg boolean or relation, neither of which is prefixable
return self.parent.parent.resolvePrefix(name)
# Requires changes for: <= >= <>, and escaped \" in "
# From shlex.py (std library for 2.2+)
class CQLshlex(shlex):
"shlex with additions for CQL parsing"
quotes = '"'
commenters = ""
nextToken = ""
def __init__(self, thing):
shlex.__init__(self, thing)
self.wordchars += "!@#$%^&*-+{}[];,.?|~`:\\"
self.wordchars += ''.join(map(chr, range(128,254)))
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
if (self.nextToken != ""):
self.token = self.nextToken
self.nextToken = ""
# Bah. SUPER ugly non portable
if self.token == "/":
self.state = ' '
break
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state ", repr(self.state), " I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
elif nextchar in ['<', '>']:
self.token = nextchar
self.state = '<'
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state == '<':
# Only accumulate <=, >= or <>
if self.token == ">" and nextchar == "=":
self.token = self.token + nextchar
self.state = ' '
break
elif self.token == "<" and nextchar in ['>', '=']:
self.token = self.token + nextchar
self.state = ' '
break
elif not nextchar:
self.state = None
break
elif nextchar == "/":
self.state = "/"
self.nextToken = "/"
break
elif nextchar in self.wordchars:
self.state='a'
self.nextToken = nextchar
break
elif nextchar in self.quotes:
self.state=nextchar
self.nextToken = nextchar
break
else:
self.state = ' '
break
elif self.state in self.quotes:
self.token = self.token + nextchar
# Allow escaped quotes
if nextchar == self.state and self.token[-2] != '\\':
self.state = ' '
break
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# Override SHLEX's ValueError to throw diagnostic
diag = Diagnostic14()
diag.details = self.token[:-1]
raise diag
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
elif nextchar in ['>', '<']:
self.nextToken = nextchar
self.state = '<'
break
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
class CQLParser:
"Token parser to create object structure for CQL"
parser = ""
currentToken = ""
nextToken = ""
def __init__(self, p):
""" Initialise with shlex parser """
self.parser = p
self.fetch_token() # Fetches to next
self.fetch_token() # Fetches to curr
def is_boolean(self, token):
"Is the token a boolean"
token = token.lower()
return token in booleans
def fetch_token(self):
""" Read ahead one token """
tok = self.parser.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def prefixes(self):
"Create prefixes dictionary"
prefs = {}
while (self.currentToken == ">"):
# Strip off maps
self.fetch_token()
if self.nextToken == "=":
# Named map
name = self.currentToken
self.fetch_token() # = is current
self.fetch_token() # id is current
identifier = self.currentToken
self.fetch_token()
else:
name = ""
identifier = self.currentToken
self.fetch_token()
if (errorOnDuplicatePrefix and prefs.has_key(name)):
# Error condition
diag = Diagnostic45()
diag.details = name
raise diag;
if len(identifier) > 1 and identifier[0] == '"' and identifier[-1] == '"':
identifier = identifier[1:-1]
prefs[name.lower()] = identifier
return prefs
def query(self):
""" Parse query """
prefs = self.prefixes()
left = self.subQuery()
while 1:
if not self.currentToken:
break;
bool = self.is_boolean(self.currentToken)
if bool:
boolobject = self.boolean()
right = self.subQuery()
# Setup Left Object
trip = tripleType()
trip.leftOperand = left
trip.boolean = boolobject
trip.rightOperand = right
left.parent = trip
right.parent = trip
boolobject.parent = trip
left = trip
else:
break;
for p in prefs.keys():
left.addPrefix(p, prefs[p])
return left
def subQuery(self):
""" Find either query or clause """
if self.currentToken == "(":
self.fetch_token() # Skip (
object = self.query()
if self.currentToken == ")":
self.fetch_token() # Skip )
else:
diag = Diagnostic13()
diag.details = self.currentToken
raise diag
else:
prefs = self.prefixes()
if (prefs):
object = self.query()
for p in prefs.keys():
object.addPrefix(p, prefs[p])
else:
object = self.clause()
return object
def clause(self):
""" Find searchClause """
bool = self.is_boolean(self.nextToken)
if not bool and not (self.nextToken in [')', '(', '']):
index = indexType(self.currentToken)
self.fetch_token() # Skip Index
rel = self.relation()
if (self.currentToken == ''):
diag = Diagnostic10()
diag.details = "Expected Term, got end of query."
raise(diag)
term = termType(self.currentToken)
self.fetch_token() # Skip Term
irt = searchClauseType(index, rel, term)
elif self.currentToken and (bool or self.nextToken in [')', '']):
irt = searchClauseType(indexType(serverChoiceIndex), relationType(serverChoiceRelation), termType(self.currentToken))
self.fetch_token()
elif self.currentToken == ">":
prefs = self.prefixes()
# iterate to get object
object = self.clause()
for p in prefs.keys():
object.addPrefix(p, prefs[p]);
return object
else:
diag = Diagnostic10()
diag.details = "Expected Boolean or Relation but got: " + self.currentToken
raise diag
return irt
def modifiers(self):
mods = []
while (self.currentToken == modifierSeparator):
self.fetch_token()
mod = self.currentToken
mod = mod.lower()
if (mod == modifierSeparator):
diag = Diagnostic20()
diag.details = "Null modifier"
raise diag
self.fetch_token()
comp = self.currentToken
if (comp in order):
self.fetch_token()
value = self.currentToken
self.fetch_token()
else:
comp = ""
value = ""
mods.append(ModifierClause(mod, comp, value))
return mods
def boolean(self):
""" Find boolean """
self.currentToken = self.currentToken.lower()
if self.currentToken in booleans:
bool = booleanType(self.currentToken)
self.fetch_token()
bool.modifiers = self.modifiers()
for b in bool.modifiers:
b.parent = bool
else:
diag = Diagnostic37()
diag.details = self.currentToken
raise diag
return bool
def relation(self):
""" Find relation """
self.currentToken = self.currentToken.lower()
rel = relationType(self.currentToken)
self.fetch_token()
rel.modifiers = self.modifiers()
for r in rel.modifiers:
r.parent = rel
return rel
class XCQLParser:
""" Parser for XCQL using some very simple DOM """
def firstChildElement(self, elem):
""" Find first child which is an Element """
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
return c
return None
def firstChildData(self,elem):
""" Find first child which is Data """
for c in elem.childNodes:
if c.nodeType == Node.TEXT_NODE:
return c
return None
def searchClause(self, elem):
""" Process a <searchClause> """
sc = searchClauseType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "index":
sc.index = indexType(self.firstChildData(c).data.lower())
elif c.localName == "term":
sc.term = termType(self.firstChildData(c).data)
elif c.localName == "relation":
sc.relation = self.relation(c)
elif c.localName == "prefixes":
sc.prefixes = self.prefixes(c)
else:
raise(ValueError, c.localName)
return sc
def triple(self, elem):
""" Process a <triple> """
trip = tripleType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "boolean":
trip.boolean = self.boolean(c)
elif c.localName == "prefixes":
trip.prefixes = self.prefixes(c)
elif c.localName == "leftOperand":
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.leftOperand = self.searchClause(c2)
else:
trip.leftOperand = self.triple(c2)
else:
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.rightOperand = self.searchClause(c2)
else:
trip.rightOperand = self.triple(c2)
return trip
def relation(self, elem):
""" Process a <relation> """
rel = relationType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
rel.value = c.firstChild.data.lower()
elif c.localName == "modifiers":
mods = []
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
for c3 in c2.childNodes:
if c3.localName == "value":
val = self.firstChildData(c2).data.lower()
mods.append(val)
rel.modifiers = mods
return rel
def boolean(self, elem):
"Process a <boolean>"
bool = booleanType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
bool.value = self.firstChildData(c).data.lower()
else:
# Can be in any order, so we need to extract, then order
mods = {}
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
type = ""
value = ""
for c3 in c2.childNodes:
if c3.nodeType == Node.ELEMENT_NODE:
if c3.localName == "value":
value = self.firstChildData(c3).data.lower()
elif c3.localName == "type":
type = self.firstChildData(c3).data
mods[type] = value
modlist = []
for t in booleanModifierTypes[1:]:
if mods.has_key(t):
modlist.append(mods[t])
else:
modlist.append('')
bool.modifiers = modlist
return bool
def prefixes(self, elem):
"Process <prefixes>"
prefs = {}
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
# prefix
name = ""
identifier = ""
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "name":
name = self.firstChildData(c2).data.lower()
elif c2.localName == "identifier":
identifier = self.firstChildData(c2).data
prefs[name] = identifier
return prefs
def xmlparse(s):
""" API. Return a seachClause/triple object from XML string """
doc = parseString(s)
q = xcqlparse(doc.firstChild)
return q
def xcqlparse(query):
""" API. Return a searchClause/triple object from XML DOM objects"""
# Requires only properties of objects so we don't care how they're generated
p = XCQLParser()
if query.localName == "searchClause":
return p.searchClause(query)
else:
return p.triple(query)
def parse(query):
""" API. Return a searchClause/triple object from CQL string"""
try:
query = query.encode("utf-8")
except:
diag = Diagnostic10()
diag.details = "Cannot parse non utf-8 characters"
raise diag
q = StringIO(query)
lexer = CQLshlex(q)
parser = CQLParser(lexer)
object = parser.query()
if parser.currentToken != '':
diag = Diagnostic10()
diag.details = "Unprocessed tokens remain: " + repr(parser.currentToken)
raise diag
else:
del lexer
del parser
del q
return object
# Assign our objects to generate
tripleType = Triple
booleanType = Boolean
relationType = Relation
searchClauseType = SearchClause
modifierClauseType = ModifierClause
modifierTypeType = ModifierType
indexType = Index
termType = Term
try:
from CQLUtils import *
tripleType = CTriple
booleanType = CBoolean
relationType = CRelation
searchClauseType = CSearchClause
modifierClauseType = CModifierClause
modifierTypeType = CModifierType
indexType = CIndex
termType = CTerm
except:
# Nested scopes. Utils needs our classes to parent
# We need its classes to build (maybe)
pass
if (__name__ == "__main__"):
import sys;
s = sys.stdin.readline()
try:
q = parse(s);
except SRWDiagnostic, diag:
# Print a full version, not just str()
print "Diagnostic Generated."
print " Code: " + str(diag.code)
print " Details: " + str(diag.details)
print " Message: " + str(diag.message)
else:
print q.toXCQL()[:-1];
| audaciouscode/Books-Mac-OS-X | Versions/Books_3.0b6/OPAC SBN.plugin/Contents/Resources/PyZ3950/CQLParser.py | Python | mit | 33,090 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_sip
short_description: BIG-IP ltm profile sip module
description:
- Configures a Session Initiation Protocol (SIP) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
alg_enable:
description:
- Enables or disables the SIP ALG (Application Level Gateway) feature.
default: disabled
choices: ['disabled', 'enabled']
app_service:
description:
- Specifies the name of the application service to which the object belongs.
community:
description:
- Specifies the community to which you want to assign the virtual server that you associate with this
profile.
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: sip
description:
description:
- User defined description.
dialog_aware:
description:
- Enables or disables the ability for the system to be aware of unauthorized use of the SIP dialog.
default: disabled
choices: ['disabled', 'enabled']
dialog_establishment_timeout:
description:
- Indicates the timeout value for dialog establishment in a sip session.
default: 10
enable_sip_firewall:
description:
- Indicates whether to enable SIP firewall functionality or not.
default: no
choices: ['no', 'yes']
insert_record_route_header:
description:
- Enables or disables the insertion of a Record-Route header, which indicates the next hop for the following
SIP request messages.
default: disabled
choices: ['disabled', 'enabled']
insert_via_header:
description:
- Enables or disables the insertion of a Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
log_profile:
description:
- Specify the name of the ALG log profile which controls the logging of ALG .
log_publisher:
description:
- Specify the name of the log publisher which logs translation events.
max_media_sessions:
description:
- Indicates the maximum number of SDP media sessions that the BIG-IP system accepts.
default: 6
max_registrations:
description:
- Indicates the maximum number of registrations, the maximum allowable REGISTER messages can be recorded
that the BIG-IP system accepts.
default: 100
max_sessions_per_registration:
description:
- Indicates the maximum number of calls or sessions can be made by a user for a single registration that the
BIG-IP system accepts.
default: 50
max_size:
description:
- Specifies the maximum SIP message size that the BIG-IP system accepts.
default: 65535
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Displays the administrative partition within which the component resides.
registration_timeout:
description:
- Indicates the timeout value for a sip registration.
default: 3600
rtp_proxy_style:
description:
- Indicates the style in which the RTP will proxy the data.
default: symmetric
choices: ['symmetric', 'restricted-by-ip-address', 'any-location']
secure_via_header:
description:
- Enables or disables the insertion of a Secure Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
security:
description:
- Enables or disables security for the SIP profile.
default: disabled
choices: ['disabled', 'enabled']
sip_session_timeout:
description:
- Indicates the timeout value for a sip session.
default: 300
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
terminate_on_bye:
description:
- Enables or disables the termination of a connection when a BYE transaction finishes.
default: enabled
choices: ['disabled', 'enabled']
user_via_header:
description:
- Enables or disables the insertion of a Via header specified by a system administrator.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile sip
f5bigip_ltm_profile_sip:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_sip_profile
partition: Common
description: My sip profile
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_POLAR_CHOICES
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
alg_enable=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
community=dict(type='str'),
defaults_from=dict(type='str'),
description=dict(type='str'),
dialog_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES),
dialog_establishment_timeout=dict(type='int'),
enable_sip_firewall=dict(type='str', choices=F5_POLAR_CHOICES),
insert_record_route_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
insert_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
log_profile=dict(type='str'),
log_publisher=dict(type='str'),
max_media_sessions=dict(type='int'),
max_registrations=dict(type='int'),
max_sessions_per_registration=dict(type='int'),
max_size=dict(type='int'),
registration_timeout=dict(type='int'),
rtp_proxy_style=dict(type='str', choices=['symmetric', 'restricted-by-ip-address', 'any-location']),
secure_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
security=dict(type='str', choices=F5_ACTIVATION_CHOICES),
sip_session_timeout=dict(type='int'),
terminate_on_bye=dict(type='str', choices=F5_ACTIVATION_CHOICES),
user_via_header=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileSip(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.sips.sip.create,
'read': self._api.tm.ltm.profile.sips.sip.load,
'update': self._api.tm.ltm.profile.sips.sip.update,
'delete': self._api.tm.ltm.profile.sips.sip.delete,
'exists': self._api.tm.ltm.profile.sips.sip.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileSip(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| erjac77/ansible-module-f5bigip | library/f5bigip_ltm_profile_sip.py | Python | apache-2.0 | 8,676 |
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import re
import types
import copy
import inspect
import traceback
import json
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
try:
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
except Exception:
ANSIBLE_VERSION = 'unknown'
from ansible.module_utils.six.moves import configparser
import ansible.module_utils.six.moves.urllib.parse as urlparse
AZURE_COMMON_ARGS = dict(
auth_source=dict(
type='str',
choices=['auto', 'cli', 'env', 'credential_file', 'msi']
),
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
cloud_environment=dict(type='str', default='AzureCloud'),
cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
api_profile=dict(type='str', default='latest'),
adfs_authority_url=dict(type='str', default=None)
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
# FUTURE: this should come from the SDK or an external location.
# For now, we have to copy from azure-cli
AZURE_API_PROFILES = {
'latest': {
'ContainerInstanceManagementClient': '2018-02-01-preview',
'ComputeManagementClient': dict(
default_api_version='2018-10-01',
resource_skus='2018-10-01',
disks='2018-06-01',
snapshots='2018-10-01',
virtual_machine_run_commands='2018-10-01'
),
'NetworkManagementClient': '2018-08-01',
'ResourceManagementClient': '2017-05-10',
'StorageManagementClient': '2017-10-01',
'WebsiteManagementClient': '2016-08-01',
'PostgreSQLManagementClient': '2017-12-01',
'MySQLManagementClient': '2017-12-01'
},
'2017-03-09-profile': {
'ComputeManagementClient': '2016-03-30',
'NetworkManagementClient': '2015-06-15',
'ResourceManagementClient': '2016-02-01',
'StorageManagementClient': '2016-01-01'
}
}
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
HAS_AZURE_CLI_CORE_EXC = None
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
try:
import importlib
except ImportError:
# This passes the sanity import test, but does not provide a user friendly error message.
# Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
importlib = None
try:
from packaging.version import Version
HAS_PACKAGING_VERSION = True
HAS_PACKAGING_VERSION_EXC = None
except ImportError:
Version = None
HAS_PACKAGING_VERSION = False
HAS_PACKAGING_VERSION_EXC = traceback.format_exc()
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError:
HAS_MSRESTAZURE_EXC = traceback.format_exc()
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
from msrestazure import azure_cloud
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.monitor.version import VERSION as monitor_client_version
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.dns.version import VERSION as dns_client_version
from azure.mgmt.web.version import VERSION as web_client_version
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.dns import DnsManagementClient
from azure.mgmt.monitor import MonitorManagementClient
from azure.mgmt.web import WebSiteManagementClient
from azure.mgmt.containerservice import ContainerServiceClient
from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
from azure.storage.blob import PageBlobService, BlockBlobService
from adal.authentication_context import AuthenticationContext
from azure.mgmt.sql import SqlManagementClient
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
except ImportError as exc:
HAS_AZURE_EXC = traceback.format_exc()
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
HAS_AZURE_CLI_CORE_EXC = None
CLIError = Exception
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
def format_resource_id(val, subscription_id, namespace, types, resource_group):
return resource_id(name=val,
resource_group=resource_group,
namespace=namespace,
type=types,
subscription=subscription_id) if not is_valid_resource_id(val) else val
def normalize_location_name(name):
return name.replace(' ', '').lower()
# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
# or generate the requirements files from this so we only have one source of truth to maintain...
AZURE_PKG_VERSIONS = {
'StorageManagementClient': {
'package_name': 'storage',
'expected_version': '3.1.0'
},
'ComputeManagementClient': {
'package_name': 'compute',
'expected_version': '4.4.0'
},
'ContainerInstanceManagementClient': {
'package_name': 'containerinstance',
'expected_version': '0.4.0'
},
'NetworkManagementClient': {
'package_name': 'network',
'expected_version': '2.3.0'
},
'ResourceManagementClient': {
'package_name': 'resource',
'expected_version': '1.2.2'
},
'DnsManagementClient': {
'package_name': 'dns',
'expected_version': '2.1.0'
},
'WebSiteManagementClient': {
'package_name': 'web',
'expected_version': '0.32.0'
},
'TrafficManagerManagementClient': {
'package_name': 'trafficmanager',
'expected_version': '0.50.0'
},
} if HAS_AZURE else {}
AZURE_MIN_RELEASE = '2.0.0'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_PACKAGING_VERSION:
self.fail(msg=missing_required_lib('packaging'),
exception=HAS_PACKAGING_VERSION_EXC)
if not HAS_MSRESTAZURE:
self.fail(msg=missing_required_lib('msrestazure'),
exception=HAS_MSRESTAZURE_EXC)
if not HAS_AZURE:
self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),
exception=HAS_AZURE_EXC)
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self._dns_client = None
self._web_client = None
self._marketplace_client = None
self._sql_client = None
self._mysql_client = None
self._postgresql_client = None
self._containerregistry_client = None
self._containerinstance_client = None
self._containerservice_client = None
self._traffic_manager_management_client = None
self._monitor_client = None
self._resource = None
self.check_mode = self.module.check_mode
self.api_profile = self.module.params.get('api_profile')
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# delegate auth to AzureRMAuth class (shared with all plugin types)
self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
if not skip_exec:
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_type):
# Ensure Azure modules are at least 2.0.0rc5.
package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
if package_version is not None:
client_name = package_version.get('package_name')
try:
client_module = importlib.import_module(client_type.__module__)
client_version = client_module.VERSION
except RuntimeError:
# can't get at the module version for some reason, just fail silently...
return
expected_version = package_version.get('expected_version')
if Version(client_version) < Version(expected_version):
self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try "
"`pip install ansible[azure]`".format(client_name, client_version, expected_version))
if Version(client_version) != Version(expected_version):
self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try "
"`pip install ansible[azure]`".format(client_name, client_version, expected_version))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def deprecate(self, msg, version=None):
self.module.deprecate(msg, version)
def log(self, msg, pretty_print=False):
if pretty_print:
self.module.debug(json.dumps(msg, indent=4, sort_keys=True))
else:
self.module.debug(msg)
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
tags = tags or dict()
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()
append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True
changed = False
# check add or update
for key, value in param_tags.items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
# check remove
if not append_tags:
for key, value in tags.items():
if not param_tags.get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError as cloud_error:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def parse_resource_to_dict(self, resource):
'''
Return a dict of the give resource, which contains name and resource group.
:param resource: It can be a resource name, id or a dict contains name and resource group.
'''
resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource
resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)
resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)
return resource_dict
def serialize_obj(self, obj, class_name, enum_modules=None):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
enum_modules = [] if enum_modules is None else enum_modules
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name, keep_readonly=True)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
if storage_blob_type == 'page':
return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
account_name=storage_account_name,
account_key=account_keys.keys[0].value)
elif storage_blob_type == 'block':
return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
account_name=storage_account_name,
account_key=account_keys.keys[0].value)
else:
raise Exception("Invalid storage blob type defined.")
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):
'''
Create a default public IP address <public_ip_name> to associate with a network interface.
If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param public_ip_name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:param sku: sku
:return: PIP object
'''
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = self.network_models.PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
sku=sku
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
'''
Create a default security group <security_group_name> to associate with a network interface. If a security group matching
<security_group_name> exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param security_group_name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = self.network_models.NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
description='Allow SSH Access',
source_port_range='*',
destination_port_range='22',
priority=100,
name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
description='Allow RDP port 3389',
source_port_range='*',
destination_port_range='3389',
priority=100,
name='RDP01'),
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
description='Allow WinRM HTTPS port 5986',
source_port_range='*',
destination_port_range='5986',
priority=101,
name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
source_port_range='*',
destination_port_range=str(port),
priority=priority,
name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
@staticmethod
def _validation_ignore_callback(session, global_config, local_config, **kwargs):
session.verify = False
def get_api_profile(self, client_type_name, api_profile_name):
profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
if not profile_all_clients:
raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
profile_raw = profile_all_clients.get(client_type_name, None)
if not profile_raw:
self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
if isinstance(profile_raw, dict):
if not profile_raw.get('default_api_version'):
raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
return profile_raw
# wrap basic strings in a dict that just defines the default
return dict(default_api_version=profile_raw)
def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
self.log('Getting management service client {0}'.format(client_type.__name__))
self.check_client_version(client_type)
client_argspec = inspect.getargspec(client_type.__init__)
if not base_url:
# most things are resource_manager, don't make everyone specify
base_url = self.azure_auth._cloud_environment.endpoints.resource_manager
client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)
api_profile_dict = {}
if self.api_profile:
api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
# unversioned clients won't accept profile; only send it if necessary
# clients without a version specified in the profile will use the default
if api_profile_dict and 'profile' in client_argspec.args:
client_kwargs['profile'] = api_profile_dict
# If the client doesn't accept api_version, it's unversioned.
# If it does, favor explicitly-specified api_version, fall back to api_profile
if 'api_version' in client_argspec.args:
profile_default_version = api_profile_dict.get('default_api_version', None)
if api_version or profile_default_version:
client_kwargs['api_version'] = api_version or profile_default_version
if 'profile' in client_kwargs:
# remove profile; only pass API version if specified
client_kwargs.pop('profile')
client = client_type(**client_kwargs)
# FUTURE: remove this once everything exposes models directly (eg, containerinstance)
try:
getattr(client, "models")
except AttributeError:
def _ansible_get_models(self, *arg, **kwarg):
return self._ansible_models
setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
client.models = types.MethodType(_ansible_get_models, client)
# Add user agent for Ansible
client.config.add_user_agent(ANSIBLE_USER_AGENT)
# Add user agent when running from Cloud Shell
if CLOUDSHELL_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
# Add user agent when running from VSCode extension
if VSCODEEXT_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
if self.azure_auth._cert_validation_mode == 'ignore':
client.config.session_configuration_callback = self._validation_ignore_callback
return client
# passthru methods to AzureAuth instance for backcompat
@property
def credentials(self):
return self.azure_auth.credentials
@property
def _cloud_environment(self):
return self.azure_auth._cloud_environment
@property
def subscription_id(self):
return self.azure_auth.subscription_id
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-07-01')
return self._storage_client
@property
def storage_models(self):
return StorageManagementClient.models("2018-07-01")
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-08-01')
return self._network_client
@property
def network_models(self):
self.log("Getting network models...")
return NetworkManagementClient.models("2018-08-01")
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-05-10')
return self._resource_client
@property
def rm_models(self):
self.log("Getting resource manager models")
return ResourceManagementClient.models("2017-05-10")
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-06-01')
return self._compute_client
@property
def compute_models(self):
self.log("Getting compute models")
return ComputeManagementClient.models("2018-06-01")
@property
def dns_client(self):
self.log('Getting dns client')
if not self._dns_client:
self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-05-01')
return self._dns_client
@property
def dns_models(self):
self.log("Getting dns models...")
return DnsManagementClient.models('2018-05-01')
@property
def web_client(self):
self.log('Getting web client')
if not self._web_client:
self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2016-08-01')
return self._web_client
@property
def containerservice_client(self):
self.log('Getting container service client')
if not self._containerservice_client:
self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._containerservice_client
@property
def sql_client(self):
self.log('Getting SQL client')
if not self._sql_client:
self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._sql_client
@property
def postgresql_client(self):
self.log('Getting PostgreSQL client')
if not self._postgresql_client:
self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._postgresql_client
@property
def mysql_client(self):
self.log('Getting MySQL client')
if not self._mysql_client:
self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._mysql_client
@property
def sql_client(self):
self.log('Getting SQL client')
if not self._sql_client:
self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._sql_client
@property
def containerregistry_client(self):
self.log('Getting container registry mgmt client')
if not self._containerregistry_client:
self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01')
return self._containerregistry_client
@property
def containerinstance_client(self):
self.log('Getting container instance mgmt client')
if not self._containerinstance_client:
self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-06-01')
return self._containerinstance_client
@property
def marketplace_client(self):
self.log('Getting marketplace agreement client')
if not self._marketplace_client:
self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._marketplace_client
@property
def traffic_manager_management_client(self):
self.log('Getting traffic manager client')
if not self._traffic_manager_management_client:
self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._traffic_manager_management_client
@property
def monitor_client(self):
self.log('Getting monitor client')
if not self._monitor_client:
self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._monitor_client
class AzureRMAuthException(Exception):
pass
class AzureRMAuth(object):
def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,
tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',
api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):
if fail_impl:
self._fail_impl = fail_impl
else:
self._fail_impl = self._default_fail_impl
self._cloud_environment = None
self._adfs_authority_url = None
# authenticate
self.credentials = self._get_credentials(
dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,
tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,
cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))
if not self.credentials:
if HAS_AZURE_CLI_CORE:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
else:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
# cert validation mode precedence: module-arg, credential profile, env, "validate"
self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \
os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
if self._cert_validation_mode not in ['validate', 'ignore']:
self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
self._cloud_environment = raw_cloud_env
elif not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc())
if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url') is None:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
else:
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials') is not None:
# AzureCLI credentials
self.azure_credentials = self.credentials['credentials']
elif self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common' # SDK default
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
"be logged in using AzureCLI.")
def fail(self, msg, exception=None, **kwargs):
self._fail_impl(msg)
def _default_fail_impl(self, msg, exception=None, **kwargs):
raise AzureRMAuthException(msg)
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except Exception:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
if not subscription_id:
try:
# use the first subscription of the MSI
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
except Exception as exc:
self.fail("Failed to get MSI token: {0}. "
"Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc)))
return {
'credentials': credentials,
'subscription_id': subscription_id
}
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
# TODO: use explicit kwargs instead of intermediate dict
def _get_credentials(self, params):
# Get authentication credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
auth_source = params.get('auth_source', None)
if not auth_source:
auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
if auth_source == 'msi':
self.log('Retrieving credenitals from MSI')
return self._get_msi_credentials(arg_credentials['subscription_id'])
if auth_source == 'cli':
if not HAS_AZURE_CLI_CORE:
self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),
exception=HAS_AZURE_CLI_CORE_EXC)
try:
self.log('Retrieving credentials from Azure CLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as err:
self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
if auth_source == 'env':
self.log('Retrieving credentials from environment')
env_credentials = self._get_env_credentials()
return env_credentials
if auth_source == 'credential_file':
self.log("Retrieving credentials from credential file")
profile = params.get('profile', 'default')
default_credentials = self._get_profile(profile)
return default_credentials
# auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
| ujenmr/ansible | lib/ansible/module_utils/azure_rm_common.py | Python | gpl-3.0 | 55,428 |
from app import db
from app.models.base_model import BaseEntity
category_page = db.Table(
'category_page',
db.Column('category_id', db.Integer, db.ForeignKey('category.id')),
db.Column('page_id', db.Integer, db.ForeignKey('page.id'))
)
# relationship required for adjacency list (self referencial many to many
# relationship)
category_category = db.Table(
'category_category',
db.Column('super_id', db.Integer, db.ForeignKey('category.id')),
db.Column('sub_id', db.Integer, db.ForeignKey('category.id'))
)
class Category(db.Model, BaseEntity):
"""
Categories for pages similar to mediawiki categories.
https://www.mediawiki.org/wiki/Help:Categories
"""
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), unique=True)
pages = db.relationship('Page', secondary=category_page,
backref=db.backref('categories'))
sub_categories = db.relationship(
'Category', secondary=category_category,
primaryjoin=id == category_category.c.super_id,
secondaryjoin=id == category_category.c.sub_id,
backref='super_categories')
def __init__(self, name):
self.name = name
def has_parent_category(self):
return len(self.super_categories) > 0
def __str__(self):
return "Category: %s" % self.name
| viaict/viaduct | app/models/category.py | Python | mit | 1,395 |
# -*- coding: utf-8 -*-
s = raw_input("--> ")
print (s, type(s))
name = input("what's your name? Please include your name into quotes: ")
print ("nice to meet you " + name + "!")
age = raw_input("ur age?")
print ("so you are already " + str(age) + " years old, " + name + "!")
ur_diary = raw_input("Plase input your diary: ")
print type(ur_diary)
print ("here is your diary:" + ur_diary) | JeremiahZhang/pybeginner | _src/om2py0w/0wex0/input_test.py | Python | mit | 394 |
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uai.api.base_api import BaseUaiServiceApiOp
class CheckUAIBaseImgExistApiOp(BaseUaiServiceApiOp):
ACTION_NAME = "CheckUAIBaseImgExist"
def __init__(self, public_key, private_key, os_version, python_version, ai_frame_version, os_deps='', pip='', project_id='', region='', zone=''):
super(CheckUAIBaseImgExistApiOp, self).__init__(self.ACTION_NAME, public_key, private_key, project_id, region, zone)
self.cmd_params['Region'] = region if region != '' else super(CheckUAIBaseImgExistApiOp, self).PARAMS_DEFAULT_REGION
self.cmd_params['OSVersion'] = os_version
self.cmd_params['PythonVersion'] = python_version
self.cmd_params['AIFrameVersion'] = ai_frame_version
self._get_pkgs(os_deps, 'AptGetPKGID')
self._get_pkgs(pip, 'PipPKGID')
def _check_args(self, params):
if params['OSVersion'] == '':
return False
if params['PythonVersion'] == '':
return False
if params['AIFrameVersion'] == '':
return False
return True
def call_api(self):
succ, self.rsp = super(CheckUAIBaseImgExistApiOp, self).call_api()
return succ, self.rsp
| ucloud/uai-sdk | uai/api/check_uai_base_img_exist.py | Python | apache-2.0 | 1,875 |
from questionnaire.models import Questionnaire, Section
questionnaire1 = Questionnaire.objects.create(name="JRF 2011 Core English", description="From dropbox as given by Rouslan",
year=2011, finalized=True)
questionnaire2 = Questionnaire.objects.create(name="JRF 2010 Core English", description="From dropbox as given by Rouslan",
year=2010, finalized=True)
questionnaire3 = Questionnaire.objects.create(name="JRF 2009 Core English", description="From dropbox as given by Rouslan",
year=2009, finalized=True)
questionnaire4 = Questionnaire.objects.create(name="JRF 2012 Core English", description="From dropbox as given by Rouslan",
year=2012, finalized=True)
Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)", order=1,
questionnaire=questionnaire1, name="Reported Cases")
Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)", order=1,
questionnaire=questionnaire2, name="Reported Cases")
Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)", order=1,
questionnaire=questionnaire3, name="Reported Cases")
Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)", order=1,
questionnaire=questionnaire4, name="Reported Cases") | testvidya11/ejrf | questionnaire/fixtures/questionnaire/old_questionnaires.py | Python | bsd-3-clause | 1,566 |
from belief_propagation import \
tree_sum_product, tree_max_product, \
tree_max_sum, tree_network_map_assignment
from mplp import mplp
from mrf import Factor, Network
import uai
from ve import condition_eliminate, eliminate, \
greedy_ordering, min_fill
| blr246/mrf | mrf/__init__.py | Python | mit | 265 |
"""Methods that support running tests"""
import time
import collections
import multiprocessing
from alarmageddon.config import Config
from alarmageddon.reporter import Reporter
from alarmageddon.publishing import hipchat, pagerduty, graphite, junit
from alarmageddon.validations.validation import Priority
from alarmageddon.result import Success, Failure
from alarmageddon import banner
def load_config(config_path, environment_name):
"""Helper method for loading a :py:class:`~alarmageddon.config.Config`
:param config_path: Path to the JSON configuration file.
:param environment_name: The config environment to run Alarmageddon in.
"""
return Config.from_file(config_path, environment_name)
def run_tests(validations, publishers=None, config_path=None,
environment_name=None, config=None, dry_run=False,
processes=1, print_banner=True):
"""Main entry point into Alarmageddon.
Run the given validations and report them to given publishers.
Either both `config_path` and `environment_name` should not be None,
or `config` should not be None.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon will perform.
:param publishers: List of :py:class:`~.publisher.Publisher`
objects that Alarmageddon will publish validation results to.
:param config_path: Path to a JSON config file.
:param environment_name: The config environment that Alarmageddon will
run in.
:param config: A :py:class:`~config.Config` object that Alarmageddon
will use to configure itself.
:param dry_run: When True, will prevent Alarmageddon from performing
validations or publishing results, and instead will print which
validations will be published by which publishers upon failure.
:processes: The number of worker processes to spawn. Does not run
spawn additional processes if set to 1.
:param print_banner: When True, print the Alarmageddon banner.
"""
publishers = publishers or []
publishers.append(junit.JUnitPublisher("results.xml"))
# We assume that if one is calling run_tests one actually wanted
# to run some tests, not just fail silently
if not validations:
raise ValueError("run_tests expected non-empty list of validations," +
"got {} instead".format(validations))
# Either use the config that was passed in or attempt to load the config
# ourselves.
if not config:
if config_path and environment_name:
config = load_config(config_path, environment_name)
else:
raise ValueError("Please specify either a Config object or " +
"both a config_path and an environment_name")
# Get rid of trailing c for byte-compiled .pyc files
name = __file__
if name[-1] == 'c':
name = name[:-1]
if print_banner:
banner.print_banner(True)
#always dry run. this will catch weird issues with enrichment
do_dry_run(validations, publishers)
if not dry_run:
# run all of the tests
_run_validations(validations, Reporter(publishers), processes)
def _run_validations(validations, reporter, processes):
""" Run the given validations and publish the results
Sort validations by order and then run them. All results are logged
to the given reporter. Once everything has been run, the reporter
will publish.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon will perform.
:param publishers: :py:class:`~.reporter.Reporter` object that will
collect validation results and then report those results to its
publishers.
:processes: The number of worker processes to spawn. Does not run
spawn additional processes if set to 1.
"""
if processes > 1:
pool = multiprocessing.Pool(processes)
else:
pool = None
order_dict = collections.defaultdict(list)
for validation in validations:
order_dict[validation.order].append(validation)
ordered_validations = [l for _, l in sorted(order_dict.items())]
group_failures = {}
for validation in validations:
if (validation.group is not None and
validation.group not in group_failures):
group_failures[validation.group] = []
for order_set in ordered_validations:
immutable_group_failures = dict(group_failures)
wrapped = [(valid, immutable_group_failures) for valid in order_set]
if pool is None:
results = map(_parallel_perform, wrapped)
else:
#map can't handle unpickleable validations
#asynch with get is so we can handle ctrl-c
results = pool.map_async(_parallel_perform, wrapped).get(999999)
for result in results:
if result.is_failure() and result.validation.group is not None:
group_failures[result.validation.group].append(result.description())
reporter.collect(result)
reporter.report()
def _parallel_perform(wrapped_info):
return _perform(*wrapped_info)
def _perform(validation, immutable_group_failures):
start = time.time()
try:
validation.perform(immutable_group_failures)
result = Success(validation.name, validation,
time=time.time() - start)
except Exception, e:
result = Failure(validation.name, validation, str(e),
time=time.time() - start)
try:
result.time = validation.get_elapsed_time()
except NotImplementedError:
pass
return result
def do_dry_run(validations, publishers):
"""Print which validations will be published by which publishers.
Assume all validations fail and list the messages that would have
been published.
:param validations: List of :py:class:`~.validation.Validation` objects
that Alarmageddon would perform.
:param publishers: List of :py:class:`~.publisher.Publisher`
objects that Alarmageddon would publish validation results to.
"""
dry_run = _compute_dry_run(validations, publishers)
publishers = dry_run.keys()
for publisher in sorted(
publishers, reverse=True,
key=lambda x: x.priority_threshold):
print("Publisher: %s (threshold: %s)" % (
publisher.name(), Priority.string(publisher.priority_threshold)))
for validation in dry_run[publisher]:
print(" %s (priority: %s)" % (
validation.name, Priority.string(validation.priority)))
def _compute_dry_run(validations, publishers):
"""Helper method for computing which validations are published where.
Provides programmatic access to the association between publishers
and validations. Return is of the form {publisher:[validation,...],...}.
"""
associations = {}
for publisher in publishers:
associations[publisher] = []
for validation in sorted(
validations, reverse=True,
key=lambda x: x.priority):
test_result = Failure(validation.name, validation, "failure")
if publisher.will_publish(test_result):
associations[publisher].append(validation)
return associations
def construct_publishers(config):
"""Construct the built-in publishers.
:param config: Config object to construct the publishers from.
"""
publishers = []
try:
publishers.append(hipchat.HipChatPublisher(
config["hipchat_host"],
config["hipchat_token"],
config.environment_name(),
config["hipchat_room"],
Priority.NORMAL))
except KeyError:
pass
try:
publishers.append(pagerduty.PagerDutyPublisher(
config["pagerduty_host"],
config["pagerduty_token"],
Priority.CRITICAL))
except KeyError:
pass
try:
publishers.append(graphite.GraphitePublisher(
config["graphite_host"],
config.get("graphite_port"),
Priority.LOW))
except KeyError:
pass
return publishers
| curtisallen/Alarmageddon | alarmageddon/run.py | Python | apache-2.0 | 8,227 |
#! /usr/bin/env python3
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 5)
-s/--setup S: statement to be executed once initially (default 'pass').
Execution time of this setup statement is NOT timed.
-p/--process: use time.process_time() (default is time.perf_counter())
-v/--verbose: print raw timing results; repeat for more digits precision
-u/--unit: set the output time unit (nsec, usec, msec, or sec)
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the
total time is at least 0.2 seconds.
Note: there is a certain baseline overhead associated with executing a
pass statement. It differs between versions. The code here doesn't try
to hide it, but you should be aware of it. The baseline overhead can be
measured by invoking the program without arguments.
Classes:
Timer
Functions:
timeit(string, string) -> float
repeat(string, string) -> list
default_timer() -> float
"""
import gc
import sys
import time
import itertools
__all__ = ["Timer", "timeit", "repeat", "default_timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 5
default_timer = time.perf_counter
_globals = globals
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer{init}):
{setup}
_t0 = _timer()
for _i in _it:
{stmt}
pass
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string). If 'globals' is specified, the code will be
executed within that namespace (as opposed to inside timeit's
namespace).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer,
globals=None):
"""Constructor. See class doc string."""
self.timer = timer
local_ns = {}
global_ns = _globals() if globals is None else globals
init = ''
if isinstance(setup, str):
# Check that the code can be compiled outside a function
compile(setup, dummy_src_name, "exec")
stmtprefix = setup + '\n'
setup = reindent(setup, 4)
elif callable(setup):
local_ns['_setup'] = setup
init += ', _setup=_setup'
stmtprefix = ''
setup = '_setup()'
else:
raise ValueError("setup is neither a string nor callable")
if isinstance(stmt, str):
# Check that the code can be compiled outside a function
compile(stmtprefix + stmt, dummy_src_name, "exec")
stmt = reindent(stmt, 8)
elif callable(stmt):
local_ns['_stmt'] = stmt
init += ', _stmt=_stmt'
stmt = '_stmt()'
else:
raise ValueError("stmt is neither a string nor callable")
src = template.format(stmt=stmt, setup=setup, init=init)
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec(code, global_ns, local_ns)
self.inner = local_ns["inner"]
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 5;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def autorange(self, callback=None):
"""Return the number of loops and time taken so that total time >= 0.2.
Calls the timeit method with increasing numbers from the sequence
1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2
second. Returns (number, time_taken).
If *callback* is given and is not None, it will be called after
each trial with two arguments: ``callback(number, time_taken)``.
"""
i = 1
while True:
for j in 1, 2, 5:
number = i * j
time_taken = self.timeit(number)
if callback:
callback(number, time_taken)
if time_taken >= 0.2:
return (number, time_taken)
i *= 10
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number, globals=None):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer, globals).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number, globals=None):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer, globals).repeat(repeat, number)
def main(args=None, *, _wrap_timer=None):
"""Main program, used when run as a script.
The optional 'args' argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
'_wrap_timer' is an internal interface used for unit testing. If it
is not None, it must be a callable that accepts a timer function
and returns another timer function (used for unit testing).
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:u:s:r:tcpvh",
["number=", "setup=", "repeat=",
"time", "clock", "process",
"verbose", "unit=", "help"])
except getopt.error as err:
print(err)
print("use -h/--help for command line help")
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
time_unit = None
units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0}
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-u", "--unit"):
if a in units:
time_unit = a
else:
print("Unrecognized unit. Please select nsec, usec, msec, or sec.",
file=sys.stderr)
return 2
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-p", "--process"):
timer = time.process_time
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print(__doc__, end=' ')
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
if _wrap_timer is not None:
timer = _wrap_timer(timer)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
callback = None
if verbose:
def callback(number, time_taken):
msg = "{num} loop{s} -> {secs:.{prec}g} secs"
plural = (number != 1)
print(msg.format(num=number, s='s' if plural else '',
secs=time_taken, prec=precision))
try:
number, _ = t.autorange(callback)
except:
t.print_exc()
return 1
if verbose:
print()
try:
raw_timings = t.repeat(repeat, number)
except:
t.print_exc()
return 1
def format_time(dt):
unit = time_unit
if unit is not None:
scale = units[unit]
else:
scales = [(scale, unit) for unit, scale in units.items()]
scales.sort(reverse=True)
for scale, unit in scales:
if dt >= scale:
break
return "%.*g %s" % (precision, dt / scale, unit)
if verbose:
print("raw times: %s" % ", ".join(map(format_time, raw_timings)))
print()
timings = [dt / number for dt in raw_timings]
best = min(timings)
print("%d loop%s, best of %d: %s per loop"
% (number, 's' if number != 1 else '',
repeat, format_time(best)))
best = min(timings)
worst = max(timings)
if worst >= best * 4:
import warnings
warnings.warn_explicit("The test results are likely unreliable. "
"The worst time (%s) was more than four times "
"slower than the best time (%s)."
% (format_time(worst), format_time(best)),
UserWarning, '', 0)
return None
if __name__ == "__main__":
sys.exit(main())
| brython-dev/brython | www/src/Lib/timeit.py | Python | bsd-3-clause | 13,495 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image-to-text model and training configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModelConfig(object):
"""Wrapper class for model hyperparameters."""
def __init__(self):
"""Sets the default model hyperparameters."""
# File pattern of sharded TFRecord file containing SequenceExample protos.
# Must be provided in training and evaluation modes.
self.input_file_pattern = None
# Image format ("jpeg" or "png").
self.image_format = "jpeg"
# Approximate number of values per input shard. Used to ensure sufficient
# mixing between shards in training.
self.values_per_input_shard = 2300
# Minimum number of shards to keep in the input queue.
self.input_queue_capacity_factor = 2
# Number of threads for prefetching SequenceExample protos.
self.num_input_reader_threads = 1
# Name of the context feature containing image data.
self.image_feature_name = "image/encoded"
# Name of the feature containing string captions.
self.caption_feature_name = "image/caption"
# Number of unique words in the vocab (plus 1, for <UNK>).
# The default value is larger than the expected actual vocab size to allow
# for differences between tokenizer versions used in preprocessing. There is
# no harm in using a value greater than the actual vocab size, but using a
# value less than the actual vocab size will result in an error.
self.vocab_size = 10002
# Number of threads for image preprocessing. Should be a multiple of 2.
self.num_preprocess_threads = 8
# Batch size.
self.batch_size = 32
# File containing an Inception v3 checkpoint to initialize the variables
# of the Inception model. Must be provided when starting training for the
# first time.
self.inception_checkpoint_file = None
# Dimensions of Inception v3 input images.
self.image_height = 299
self.image_width = 299
# Scale used to initialize model variables.
self.initializer_scale = 0.08
# LSTM input and output dimensionality, respectively.
self.embedding_size = 512
self.num_lstm_units = 512
# If < 1.0, the dropout keep probability applied to LSTM variables.
self.lstm_dropout_keep_prob = 0.7
class TrainingConfig(object):
"""Wrapper class for training hyperparameters."""
def __init__(self):
"""Sets the default training hyperparameters."""
# Number of examples per epoch of training data.
self.num_examples_per_epoch = 586363
# Optimizer for training the model.
self.optimizer = "SGD"
# Learning rate for the initial phase of training.
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
# Learning rate when fine tuning the Inception v3 parameters.
self.train_inception_learning_rate = 0.0005
# If not None, clip gradients to this value.
self.clip_gradients = 5.0
# How many model checkpoints to keep.
self.max_checkpoints_to_keep = 5
| mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/experimental/show_and_tell/configuration.py | Python | apache-2.0 | 3,771 |
"""
Support for Xeoma Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.xeoma/
"""
import logging
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
REQUIREMENTS = ['pyxeoma==1.4.1']
_LOGGER = logging.getLogger(__name__)
CONF_CAMERAS = 'cameras'
CONF_HIDE = 'hide'
CONF_IMAGE_NAME = 'image_name'
CONF_NEW_VERSION = 'new_version'
CONF_VIEWER_PASSWORD = 'viewer_password'
CONF_VIEWER_USERNAME = 'viewer_username'
CAMERAS_SCHEMA = vol.Schema({
vol.Required(CONF_IMAGE_NAME): cv.string,
vol.Optional(CONF_HIDE, default=False): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
}, required=False)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_CAMERAS):
vol.Schema(vol.All(cv.ensure_list, [CAMERAS_SCHEMA])),
vol.Optional(CONF_NEW_VERSION, default=True): cv.boolean,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Discover and setup Xeoma Cameras."""
from pyxeoma.xeoma import Xeoma, XeomaError
host = config[CONF_HOST]
login = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
xeoma = Xeoma(host, login, password)
try:
await xeoma.async_test_connection()
discovered_image_names = await xeoma.async_get_image_names()
discovered_cameras = [
{
CONF_IMAGE_NAME: image_name,
CONF_HIDE: False,
CONF_NAME: image_name,
CONF_VIEWER_USERNAME: username,
CONF_VIEWER_PASSWORD: pw
}
for image_name, username, pw in discovered_image_names
]
for cam in config.get(CONF_CAMERAS, []):
camera = next(
(dc for dc in discovered_cameras
if dc[CONF_IMAGE_NAME] == cam[CONF_IMAGE_NAME]), None)
if camera is not None:
if CONF_NAME in cam:
camera[CONF_NAME] = cam[CONF_NAME]
if CONF_HIDE in cam:
camera[CONF_HIDE] = cam[CONF_HIDE]
cameras = list(filter(lambda c: not c[CONF_HIDE], discovered_cameras))
async_add_entities(
[XeomaCamera(xeoma, camera[CONF_IMAGE_NAME], camera[CONF_NAME],
camera[CONF_VIEWER_USERNAME],
camera[CONF_VIEWER_PASSWORD]) for camera in cameras])
except XeomaError as err:
_LOGGER.error("Error: %s", err.message)
return
class XeomaCamera(Camera):
"""Implementation of a Xeoma camera."""
def __init__(self, xeoma, image, name, username, password):
"""Initialize a Xeoma camera."""
super().__init__()
self._xeoma = xeoma
self._name = name
self._image = image
self._username = username
self._password = password
self._last_image = None
async def async_camera_image(self):
"""Return a still image response from the camera."""
from pyxeoma.xeoma import XeomaError
try:
image = await self._xeoma.async_get_camera_image(
self._image, self._username, self._password)
self._last_image = image
except XeomaError as err:
_LOGGER.error("Error fetching image: %s", err.message)
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
| jamespcole/home-assistant | homeassistant/components/xeoma/camera.py | Python | apache-2.0 | 3,817 |
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-4, batch_size=1000)
din = data_layer(name='data', size=100)
label = data_layer(name='label', size=10)
outputs(hsigmoid(input=din, label=label, num_classes=10))
| emailweixu/Paddle | python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py | Python | apache-2.0 | 233 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring, long, unicode
import functools
from collections import Mapping
from datetime import datetime
from sqlalchemy import extract, func
from sqlalchemy.orm import synonym
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from flexget.manager import Session
from flexget.utils import qualities, json
from flexget.entry import Entry
def with_session(*args, **kwargs):
""""
A decorator which creates a new session if one was not passed via keyword argument to the function.
Automatically commits and closes the session if one was created, caller is responsible for commit if passed in.
If arguments are given when used as a decorator, they will automatically be passed to the created Session when
one is not supplied.
"""
def decorator(func):
def wrapper(*args, **kwargs):
if kwargs.get('session'):
return func(*args, **kwargs)
with _Session() as session:
kwargs['session'] = session
return func(*args, **kwargs)
return wrapper
if len(args) == 1 and not kwargs and callable(args[0]):
# Used without arguments, e.g. @with_session
# We default to expire_on_commit being false, in case the decorated function returns db instances
_Session = functools.partial(Session, expire_on_commit=False)
return decorator(args[0])
else:
# Arguments were specified, turn them into arguments for Session creation e.g. @with_session(autocommit=True)
_Session = functools.partial(Session, *args, **kwargs)
return decorator
def pipe_list_synonym(name):
"""Converts pipe separated text into a list"""
def getter(self):
attr = getattr(self, name)
if attr:
return attr.strip('|').split('|')
def setter(self, value):
if isinstance(value, str):
setattr(self, name, value)
else:
setattr(self, name, '|'.join(value))
return synonym(name, descriptor=property(getter, setter))
def text_date_synonym(name):
"""Converts Y-M-D date strings into datetime objects"""
def getter(self):
return getattr(self, name)
def setter(self, value):
if isinstance(value, basestring):
try:
setattr(self, name, datetime.strptime(value, '%Y-%m-%d'))
except ValueError:
# Invalid date string given, set to None
setattr(self, name, None)
else:
setattr(self, name, value)
return synonym(name, descriptor=property(getter, setter))
def entry_synonym(name):
"""Use json to serialize python objects for db storage."""
def only_builtins(item):
supported_types = (str, unicode, int, float, long, bool, datetime)
# dict, list, tuple and set are also supported, but handled separately
if isinstance(item, supported_types):
return item
elif isinstance(item, Mapping):
result = {}
for key, value in item.items():
try:
result[key] = only_builtins(value)
except TypeError:
continue
return result
elif isinstance(item, (list, tuple, set)):
result = []
for value in item:
try:
result.append(only_builtins(value))
except ValueError:
continue
if isinstance(item, list):
return result
elif isinstance(item, tuple):
return tuple(result)
else:
return set(result)
elif isinstance(item, qualities.Quality):
return item.name
else:
for s_type in supported_types:
if isinstance(item, s_type):
return s_type(item)
# If item isn't a subclass of a builtin python type, raise ValueError.
raise TypeError('%r is not of type Entry.' % type(item))
def getter(self):
return Entry(json.loads(getattr(self, name), decode_datetime=True))
def setter(self, entry):
if isinstance(entry, Entry) or isinstance(entry, dict):
setattr(
self, name, unicode(json.dumps(only_builtins(dict(entry)), encode_datetime=True))
)
else:
raise TypeError('%r is not of type Entry or dict.' % type(entry))
return synonym(name, descriptor=property(getter, setter))
def json_synonym(name):
"""Use json to serialize python objects for db storage."""
def getter(self):
return json.loads(getattr(self, name), decode_datetime=True)
def setter(self, entry):
setattr(self, name, unicode(json.dumps(entry, encode_datetime=True)))
return synonym(name, descriptor=property(getter, setter))
class CaseInsensitiveWord(Comparator):
"""Hybrid value representing a string that compares case insensitively."""
def __init__(self, word):
if isinstance(word, CaseInsensitiveWord):
self.word = word.word
else:
self.word = word
def lower(self):
if isinstance(self.word, str):
return self.word.lower()
else:
return func.lower(self.word)
def operate(self, op, other):
if not isinstance(other, CaseInsensitiveWord):
other = CaseInsensitiveWord(other)
return op(self.lower(), other.lower())
def __clause_element__(self):
return self.lower()
def __str__(self):
return self.word
def __getattr__(self, item):
"""Expose string methods to be called directly on this object."""
return getattr(self.word, item)
def quality_property(text_attr):
def getter(self):
return qualities.Quality(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, str):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.name)
class QualComparator(Comparator):
def operate(self, op, other):
if isinstance(other, qualities.Quality):
other = other.name
return op(self.__clause_element__(), other)
def comparator(self):
return QualComparator(getattr(self, text_attr))
prop = hybrid_property(getter, setter)
prop = prop.comparator(comparator)
return prop
def quality_requirement_property(text_attr):
def getter(self):
return qualities.Requirements(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, str):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.text)
prop = hybrid_property(getter, setter)
return prop
def ignore_case_property(text_attr):
def getter(self):
return CaseInsensitiveWord(getattr(self, text_attr))
def setter(self, value):
setattr(self, text_attr, value)
return hybrid_property(getter, setter)
def year_property(date_attr):
def getter(self):
date = getattr(self, date_attr)
return date and date.year
def expr(cls):
return extract('year', getattr(cls, date_attr))
return hybrid_property(getter, expr=expr)
| gazpachoking/Flexget | flexget/utils/database.py | Python | mit | 7,455 |
''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
http://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
'''
from __future__ import division, print_function, absolute_import
'''
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had therefore to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to cotain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that, whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for a debugging
script I was working with.
'''
# Small fragments of current code adapted from matfile.py by Heiko
# Henkelmann
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
from numpy.compat import asbytes, asstr
import scipy.sparse
from scipy.lib.six import string_types
from .byteordercodes import native_code, swapped_code
from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from .mio5_utils import VarReader5
# Constants and helper objects
from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8, miUTF8,
miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info)
from .streams import ZlibInputStream
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
uint16_codec=None
):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g. 'utf-8').
Use system default codec if None
'''
super(MatFile5Reader, self).__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record
)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0 '''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header()
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, string_types):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
'Unreadable variable "%s", because "%s"' %
(name, err),
Warning, stacklevel=2)
res = "Read error: %s" % err
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names:
variable_names.remove(name)
if len(variable_names) == 0:
break
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in python 3, and is ``cStringIO`` for
python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
mdict = rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : ndarray
Examples
--------
>>> to_writeable(np.array([1])) # pass through ndarrays
array([1])
>>> expected = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
>>> np.all(to_writeable({'a':1,'b':2}) == expected)
True
>>> np.all(to_writeable({'a':1,'b':2, '_c':3}) == expected)
True
>>> np.all(to_writeable({'a':1,'b':2, 100:3}) == expected)
True
>>> np.all(to_writeable({'a':1,'b':2, '99':3}) == expected)
True
>>> class klass(object): pass
>>> c = klass
>>> c.a = 1
>>> c.b = 2
>>> np.all(to_writeable({'a':1,'b':2}) == expected)
True
>>> to_writeable([])
array([], dtype=float64)
>>> to_writeable(())
array([], dtype=float64)
>>> to_writeable(None)
>>> to_writeable('a string').dtype.type == np.str_
True
>>> to_writeable(1)
array(1)
>>> to_writeable([1])
array([1])
>>> to_writeable([1])
array([1])
>>> to_writeable(object()) # not convertable
dict keys with legal characters are convertible
>>> to_writeable({'a':1})['a']
array([1], dtype=object)
but not with illegal characters
>>> to_writeable({'1':1}) is None
True
>>> to_writeable({'_a':1}) is None
True
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, string_types) and
not field[0] in '_0123456789'):
dtype.append((field,object))
values.append(value)
if dtype:
return np.array([tuple(values)],dtype)
else:
return None
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (np.object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5(object):
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tostring(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tostring is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tostring(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array-like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array-like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distiguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.product(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as one-dimensional byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
nzmax=nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError(
"Field names are restricted to %d characters"
% (max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer(object):
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tostring())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a numpy
array.
write_header : {None, True, False}
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, asbytes(name), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tostring())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, asbytes(name), is_global)
| kmspriyatham/symath | scipy/scipy/io/matlab/mio5.py | Python | apache-2.0 | 31,810 |
# -*- coding: utf-8 -*-
from struct import unpack, calcsize
import datetime
import zlib
import tempfile
import os
def read_string(file):
"""
Считывает строковое значение из undeflated EFD файла.
:param file: Обрабатываемый файл
:type file: BufferedReader
:return: Строковое значение
:rtype: string
"""
# Длина строки в символах
str_len = unpack('I', file.read(4))[0]
# Длина в байтах. Каждый символ UTF-16 - 2 байта
size = str_len * 2
data = unpack(str(size) + 's', file.read(size))[0]
return data.decode('utf-16')
def read_supply_info(file):
"""
Считывает информацию о комплекте поставки
:param file: undeflated EFD файл
:type file: BufferedReader
:return: язык, наименование комплекта, наименование поставщика, путь к файлу описания
:rtype: tuple
"""
# Назначение этой информации неизвестно
file.read(4)
lang = read_string(file)
supply_name = read_string(file)
provider_name = read_string(file)
description_path = read_string(file)
return lang, supply_name, provider_name, description_path
def read_included_file_info(file):
"""
Считывает описание вложенного файла
:param file: undeflated EFD файл
:type file: BufferedReader
:return: имя файла, время создания, размер файла (байт)
:rtype: tuple
"""
# Назначение этой информации неизвестно
file.read(4)
filename = read_string(file)
filetime = unpack('Q', file.read(8))[0]
# FILETIME - 64-битовое значение, представляющее число интервалов по 100 наносекунд с 1 января 1601
timestamp = datetime.datetime(1601, 1, 1) + datetime.timedelta(microseconds=filetime/10)
# Назначение этой информации неизвестно
file.read(4)
file_size = unpack('I', file.read(4))[0]
return filename, timestamp, file_size
class SupplyReader(object):
"""
Класс для чтения файлов поставок
:param file: файл поставки (EFD)
:type file: BufferedReader
"""
# Размер блока чтения данных, байт
CHUNK_SIZE = 10*1024*1024
def __init__(self, file):
self.file = file
self.description = {}
self.included_files = []
def unpack(self, output_dir):
"""
Распаковка файла поставки
:param output_dir: Каталог распаковки
:type output_dir: string
"""
with tempfile.TemporaryFile() as f:
decompressor = zlib.decompressobj(-15)
while True:
chunk = self.file.read(self.CHUNK_SIZE)
if not chunk:
break
f.write(decompressor.decompress(chunk))
f.seek(0)
header, supply_info_count = unpack('II', f.read(8))
# Во всех исследованных файлах поставок заголовок был равен 1.
# Возможно это версия формата?
assert header == 1
for i in range(supply_info_count):
lang, supply_name, provider_name, description_path = read_supply_info(f)
self.description[lang] = supply_name, provider_name, description_path
included_files_count = unpack('I', f.read(4))[0]
for i in range(included_files_count):
self.included_files.append(read_included_file_info(f))
for included_file in self.included_files:
src_path, mtime, size = included_file
# Путь все время указан с \ слэшем (Windows style)
path = os.path.join(
os.path.abspath(output_dir),
*src_path.split('\\')
)
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(path, 'wb') as out_file:
for i in range(size // self.CHUNK_SIZE):
out_file.write(f.read(self.CHUNK_SIZE))
out_file.write(f.read(size % self.CHUNK_SIZE))
timestamp = mtime.timestamp()
os.utime(path, (timestamp, timestamp)) | Infactum/onec_dtools | onec_dtools/supply_reader.py | Python | mit | 4,752 |
import logging
import subprocess
log = logging.getLogger(__name__)
def run(command, stdin=None, cwd=None):
log.info('running: %s' % command)
p = subprocess.Popen(
command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
stdout, stderr = p.communicate(stdin)
if stdout:
log.info(stdout)
if stderr:
log.error(stderr)
return stdout, stderr
def sudo(command, stdin=None, cwd=None):
return run('sudo su root -c "%s"' % command.replace('"', '\\"'), stdin=stdin, cwd=cwd)
def remote(host, command, trust=False):
base = "export PYTHONPATH=~/git/borkbork:~/git/msgme-api"
base += " && export PATH=$PATH:~/git/borkbork/bin"
return run('ssh %subuntu@%s "%s && %s"' % (
trust and '-o StrictHostKeychecking=no ' or '', host, base, command))
def put(data, dest, sudo=False, mode=None):
if sudo:
sh = sudo
else:
sh = run
sh('cat > %s' % dest, stdin=data)
if sudo:
sh('chown root %s' % dest)
if mode != None:
sh('chmod %s %s' % (mode, dest))
| cablehead/bork | bork/shell.py | Python | mit | 1,142 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.buildslave.protocols import base
from twisted.internet import defer
class FakeConnection(base.Connection):
def __init__(self, master, buildslave):
base.Connection.__init__(self, master, buildslave)
self._connected = True
self.remoteCalls = []
self.builders = {} # { name : isBusy }
# users of the fake can add to this as desired
self.info = {
'slave_commands': [],
'version': '0.8.2',
'basedir': '/sl',
'system': 'nt',
}
def remotePrint(self, message):
self.remoteCalls.append(('remotePrint', message))
return defer.succeed(None)
def remoteGetSlaveInfo(self):
self.remoteCalls.append(('remoteGetSlaveInfo',))
return defer.succeed(self.slaveInfo)
def remoteSetBuilderList(self, builders):
self.remoteCalls.append(('remoteSetBuilderList', builders[:]))
self.builders = dict((b, False) for b in builders)
return defer.succeed(None)
def remoteStartCommand(self, remoteCommand, builderName, commandId, commandName, args):
self.remoteCalls.append(('remoteStartCommand', remoteCommand, builderName,
commandId, commandName, args))
return defer.succeed(None)
def remoteShutdown(self):
self.remoteCalls.append(('remoteShutdown',))
return defer.succeed(None)
def remoteStartBuild(self, builderName):
self.remoteCalls.append(('remoteStartBuild', builderName))
return defer.succeed(None)
def remoteInterruptCommand(self, commandId, why):
self.remoteCalls.append(('remoteInterruptCommand', commandId, why))
return defer.succeed(None)
| zozo123/buildbot | master/buildbot/test/fake/fakeprotocol.py | Python | gpl-3.0 | 2,441 |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Finite-element second-order ODE solver'''
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix
from horton.grid.cext import CubicSpline, build_ode2
__all__ = ['solve_ode2']
def solve_ode2(b, a, f, bcs, extrapolation=None):
'''Solve a second order ODE.
**Arguments:**
b, a, f
Cubic splines for the given functions in the second order ODE. (See
build_neumann for details.) These cubic splines must have identical
RTransform objects.
bcs
The boundary conditions (See build_neumann for details.)
**Optional arguments:**
extrapolation
The extrapolation object for the returned cubic spline.
**Returns:** a cubic spline object with the solution that uses the same
RTransform object as the input functions a, b and f.
'''
# Parse args.
rtf = b.rtransform
if rtf.to_string() != a.rtransform.to_string():
raise ValueError('The RTransform objects of b and a do not match.')
if rtf.to_string() != f.rtransform.to_string():
raise ValueError('The RTransform objects of b and f do not match.')
# Transform the given functions to the linear coordinate.
j1 = rtf.get_deriv()
j2 = rtf.get_deriv2()
j3 = rtf.get_deriv3()
j1sq = j1*j1
by_new = j1*b.y - j2/j1
bd_new = j2*b.y + j1sq*b.dx + (j2*j2 - j1*j3)/j1sq
ay_new = a.y*j1sq
ad_new = (a.dx*j1sq + 2*a.y*j2)*j1
fy_new = f.y*j1sq
fd_new = (f.dx*j1sq + 2*f.y*j2)*j1
# Transform the boundary conditions
new_bcs = (
bcs[0], None if bcs[1] is None else bcs[1]*j1[0],
bcs[2], None if bcs[3] is None else bcs[3]*j1[-1],
)
# Call the equation builder.
coeffs, rhs = build_ode2(by_new, bd_new, ay_new, ad_new, fy_new, fd_new, new_bcs)
solution = spsolve(csc_matrix(coeffs), rhs)
uy_new = solution[::2]
ud_new = solution[1::2]
# Transform solution back to the original coordinate.
uy_orig = uy_new.copy() # A copy of is needed to obtain contiguous arrays.
ud_orig = ud_new/j1
return CubicSpline(uy_orig, ud_orig, rtf, extrapolation)
| theochem/horton | horton/grid/ode2.py | Python | gpl-3.0 | 2,964 |
from .models import QueueItem
def enqueue(queue_type='s'):
item = QueueItem.objects.create(item_type=queue_type)
return item.id
def peek(queue_type,queue_id, upto_first_n=1):
# check if job_id is one of the first N items from the head of queue
top_items = QueueItem.objects.filter(item_type=queue_type).order_by('pk')[:upto_first_n]
for item in top_items:
if queue_id == item.id:
return True
return False
def dequeue(queue_id):
QueueItem.objects.get(pk=queue_id).delete()
| acil-bwh/SpearmintServer | SpearmintServer/api/queue.py | Python | mit | 523 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsdelimitedtextprovider_wanted.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Chris Crook
Email : ccrook at linz dot govt dot nz
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Chris Crook'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Chris Crook'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def test_002_load_csv_file():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_003_field_naming():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_2': 'NULL',
'28_1': 'NULL',
'24.5': 'NULL',
'field_3_1': 'NULL',
'data_1': 'NULL',
'field_10': 'NULL',
'field_11': 'NULL',
'field_12': 'last data',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_004_max_fields():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&maxFields=7&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_1': 'NULL',
'28_1': 'NULL',
'24.5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_005_load_whitespace():
wanted = {}
wanted['uri'] = 'file://test.space?geomType=none&type=whitespace'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Simple_whitespace_file',
'data': 'data1',
'info': 'info1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Whitespace_at_start_of_line',
'data': 'data2',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Tab_whitespace',
'data': 'data3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Multiple_whitespace_characters',
'data': 'data4',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Extra_fields',
'data': 'data5',
'info': 'info5',
'field_5': 'message5',
'field_6': 'rubbish5',
'#fid': 6,
'#geometry': 'None',
},
7: {
'id': '6',
'description': 'Missing_fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_006_quote_escape():
wanted = {}
wanted['uri'] = 'file://test.pipe?geomType=none"e="&delimiter=|&escape=\\'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Using pipe delimiter',
'data': 'data 1',
'info': 'info 1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Using backslash escape on pipe',
'data': 'data 2 | piped',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Backslash escaped newline',
'data': 'data3 \nline2 \nline3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
7: {
'id': '4',
'description': 'Empty field',
'data': 'NULL',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
8: {
'id': '5',
'description': 'Quoted field',
'data': 'More | piped data',
'info': 'info5',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 8,
'#geometry': 'None',
},
9: {
'id': '6',
'description': 'Escaped quote',
'data': 'Field "citation" ',
'info': 'info6',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '7',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
11: {
'id': '8',
'description': 'Extra fields',
'data': 'data8',
'info': 'info8',
'field_5': 'message8',
'field_6': 'more',
'#fid': 11,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_007_multiple_quote():
wanted = {}
wanted['uri'] = 'file://test.quote?geomType=none"e=\'"&type=csv&escape="\''
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Multiple quotes 1',
'data': 'Quoted,data1',
'info': 'info1',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Multiple quotes 2',
'data': 'Quoted,data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Leading and following whitespace',
'data': 'Quoted, data3',
'info': 'info3',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Embedded quotes 1',
'data': 'Quoted \'\'"\'\' data4',
'info': 'info4',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Embedded quotes 2',
'data': 'Quoted \'""\' data5',
'info': 'info5',
'#fid': 6,
'#geometry': 'None',
},
10: {
'id': '9',
'description': 'Final record',
'data': 'date9',
'info': 'info9',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.quote',
'3 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 7',
'Invalid record format at line 8',
'Invalid record format at line 9',
]
return wanted
def test_008_badly_formed_quotes():
wanted = {}
wanted['uri'] = 'file://test.badquote?geomType=none"e="&type=csv&escape="'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
4: {
'id': '3',
'description': 'Recovered after unclosed quore',
'data': 'Data ok',
'info': 'inf3',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.badquote',
'2 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 2',
'Invalid record format at line 5',
]
return wanted
def test_009_skip_lines():
wanted = {}
wanted['uri'] = 'file://test2.csv?geomType=none&skipLines=2&type=csv&useHeader=no'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '3',
'description': 'Less data',
'field_1': '3',
'field_2': 'Less data',
'field_3': 'data3',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_010_read_coordinates():
wanted = {}
wanted['uri'] = 'file://testpt.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic point',
'geom_x': '10.5',
'geom_y': '20.82',
'#fid': 2,
'#geometry': 'Point (10.5 20.82)',
},
3: {
'id': '2',
'description': 'Integer point',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
5: {
'id': '4',
'description': 'Final point',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file testpt.csv',
'1 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 4',
]
return wanted
def test_011_read_wkt():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_012_read_wkt_point():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=point&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_013_read_wkt_line():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=line&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
4: {
'id': '3',
'description': 'Linestring wkt',
'#fid': 4,
'#geometry': 'LineString (10 20, 11 21)',
},
5: {
'id': '4',
'description': 'Multiline string wkt',
'#fid': 5,
'#geometry': 'MultiLineString ((10 20, 11 21), (20 30, 21 31))',
},
12: {
'id': '11',
'description': 'Measure in line',
'#fid': 12,
'#geometry': 'LineStringM (10 20 30, 11 21 31)',
},
13: {
'id': '12',
'description': 'Z in line',
'#fid': 13,
'#geometry': 'LineStringZ (10 20 30, 11 21 31)',
},
14: {
'id': '13',
'description': 'Measure and Z in line',
'#fid': 14,
'#geometry': 'LineStringZM (10 20 30 40, 11 21 31 41)',
},
15: {
'id': '14',
'description': 'CircularString',
'#fid': 15,
'#geometry': 'CircularString (268 415, 227 505, 227 406)',
},
17: {
'id': '16',
'description': 'CompoundCurve',
'#fid': 17,
'#geometry': 'CompoundCurve ((5 3, 5 13), CircularString(5 13, 7 15, 9 13), (9 13, 9 3), CircularString(9 3, 7 1, 5 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'8 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_014_read_wkt_polygon():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=polygon&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 2
wanted['data'] = {
6: {
'id': '5',
'description': 'Polygon wkt',
'#fid': 6,
'#geometry': 'Polygon ((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14))',
},
7: {
'id': '6',
'description': 'MultiPolygon wkt',
'#fid': 7,
'#geometry': 'MultiPolygon (((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14)),((30 30,30 35,35 35,30 30)))',
},
16: {
'id': '15',
'description': 'CurvePolygon',
'#fid': 16,
'#geometry': 'CurvePolygon (CircularString (1 3, 3 5, 4 7, 7 3, 1 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'12 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_015_read_dms_xy():
wanted = {}
wanted['uri'] = 'file://testdms.csv?yField=lat&xField=lon&type=csv&xyDms=yes'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
3: {
'id': '1',
'description': 'Basic DMS string',
'lon': '1 5 30.6',
'lat': '35 51 20',
'#fid': 3,
'#geometry': 'Point (1.09183333 35.85555556)',
},
4: {
'id': '2',
'description': 'Basic DMS string 2',
'lon': '1 05 30.6005',
'lat': '035 51 20',
'#fid': 4,
'#geometry': 'Point (1.09183347 35.85555556)',
},
5: {
'id': '3',
'description': 'Basic DMS string 3',
'lon': '1 05 30.6',
'lat': '35 59 9.99',
'#fid': 5,
'#geometry': 'Point (1.09183333 35.98610833)',
},
7: {
'id': '4',
'description': 'Prefix sign 1',
'lon': 'n1 05 30.6',
'lat': 'e035 51 20',
'#fid': 7,
'#geometry': 'Point (1.09183333 35.85555556)',
},
8: {
'id': '5',
'description': 'Prefix sign 2',
'lon': 'N1 05 30.6',
'lat': 'E035 51 20',
'#fid': 8,
'#geometry': 'Point (1.09183333 35.85555556)',
},
9: {
'id': '6',
'description': 'Prefix sign 3',
'lon': 'N 1 05 30.6',
'lat': 'E 035 51 20',
'#fid': 9,
'#geometry': 'Point (1.09183333 35.85555556)',
},
10: {
'id': '7',
'description': 'Prefix sign 4',
'lon': 'S1 05 30.6',
'lat': 'W035 51 20',
'#fid': 10,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
11: {
'id': '8',
'description': 'Prefix sign 5',
'lon': '+1 05 30.6',
'lat': '+035 51 20',
'#fid': 11,
'#geometry': 'Point (1.09183333 35.85555556)',
},
12: {
'id': '9',
'description': 'Prefix sign 6',
'lon': '-1 05 30.6',
'lat': '-035 51 20',
'#fid': 12,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
14: {
'id': '10',
'description': 'Postfix sign 1',
'lon': '1 05 30.6n',
'lat': '035 51 20e',
'#fid': 14,
'#geometry': 'Point (1.09183333 35.85555556)',
},
15: {
'id': '11',
'description': 'Postfix sign 2',
'lon': '1 05 30.6N',
'lat': '035 51 20E',
'#fid': 15,
'#geometry': 'Point (1.09183333 35.85555556)',
},
16: {
'id': '12',
'description': 'Postfix sign 3',
'lon': '1 05 30.6 N',
'lat': '035 51 20 E',
'#fid': 16,
'#geometry': 'Point (1.09183333 35.85555556)',
},
17: {
'id': '13',
'description': 'Postfix sign 4',
'lon': '1 05 30.6S',
'lat': '035 51 20W',
'#fid': 17,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
18: {
'id': '14',
'description': 'Postfix sign 5',
'lon': '1 05 30.6+',
'lat': '035 51 20+',
'#fid': 18,
'#geometry': 'Point (1.09183333 35.85555556)',
},
19: {
'id': '15',
'description': 'Postfix sign 6',
'lon': '1 05 30.6-',
'lat': '035 51 20-',
'#fid': 19,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
21: {
'id': '16',
'description': 'Leading and trailing blanks 1',
'lon': ' 1 05 30.6',
'lat': '035 51 20 ',
'#fid': 21,
'#geometry': 'Point (1.09183333 35.85555556)',
},
22: {
'id': '17',
'description': 'Leading and trailing blanks 2',
'lon': ' N 1 05 30.6',
'lat': '035 51 20 E ',
'#fid': 22,
'#geometry': 'Point (1.09183333 35.85555556)',
},
24: {
'id': '18',
'description': 'Alternative characters for D,M,S',
'lon': '1d05m30.6s S',
'lat': "35d51'20",
'#fid': 24,
'#geometry': 'Point (-1.09183333 35.85555556)',
},
25: {
'id': '19',
'description': 'Degrees/minutes format',
'lon': '1 05.23',
'lat': '4 55.03',
'#fid': 25,
'#geometry': 'Point (1.08716667 4.91716667)',
},
}
wanted['log'] = [
'Errors in file testdms.csv',
'5 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 27',
'Invalid X or Y fields at line 28',
'Invalid X or Y fields at line 29',
'Invalid X or Y fields at line 30',
'Invalid X or Y fields at line 31',
]
return wanted
def test_016_decimal_point():
wanted = {}
wanted['uri'] = 'file://testdp.csv?yField=geom_y&xField=geom_x&type=csv&delimiter=;&decimalPoint=,'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Comma as decimal point 1',
'geom_x': '10.0',
'geom_y': '20.0',
'other': '30.0',
'text field': 'Field with , in it',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Comma as decimal point 2',
'geom_x': '12.0',
'geom_y': '25.003',
'other': '-38.55',
'text field': 'Plain text field',
'#fid': 3,
'#geometry': 'Point (12 25.003)',
},
}
wanted['log'] = []
return wanted
def test_017_regular_expression_1():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=RE(?:GEXP)?&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic regular expression test',
'data': 'data1',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Basic regular expression test 2',
'data': 'data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_018_regular_expression_2():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=(RE)(GEXP)?&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': 'GEXP',
'data': 'data1',
'RE_2': 'RE',
'GEXP_2': 'GEXP',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': 'NULL',
'data': 'data2',
'RE_2': 'RE',
'GEXP_2': 'NULL',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_019_regular_expression_3():
wanted = {}
wanted['uri'] = 'file://testre2.txt?geomType=none&trimFields=Y&delimiter=^(.{5})(.{30})(.{5,})&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Anchored regexp',
'information': 'Some data',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Anchored regexp recovered',
'information': 'Some data',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testre2.txt',
'1 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 3',
]
return wanted
def test_020_regular_expression_4():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=x?&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'f',
'description': 'i',
's': 'f',
'm': 'i',
'a': '.',
'l': '.',
'l_1': 'i',
'field_6': 'l',
'field_7': 'e',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_021_regular_expression_5():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=\\b&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'fi',
'description': '..',
'small': 'fi',
'field_2': '..',
'field_3': 'ile',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_022_utf8_encoded_file():
wanted = {}
wanted['uri'] = 'file://testutf8.csv?geomType=none&delimiter=|&type=csv&encoding=utf-8'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read UTF8 encoding',
'name': 'Field has \u0101cc\xe8nt\xe9d text',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_023_latin1_encoded_file():
wanted = {}
wanted['uri'] = 'file://testlatin1.csv?geomType=none&delimiter=|&type=csv&encoding=latin1'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read latin1 encoding',
'name': 'This test is \xa9',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_024_filter_rect_xy():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_025_filter_rect_wkt():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_026_filter_fid():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
3003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_027_filter_attributes():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'None',
'description': 'Basic unquoted record',
'data': 'None',
'info': 'Some info',
'field_5': 'None',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': 'None',
'description': 'Quoted field',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': 'None',
'description': 'Escaped quotes',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': 'None',
'description': 'Quoted newlines',
'data': 'None',
'info': 'No data',
'field_5': 'None',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': 'None',
'description': 'Missing fields',
'data': 'None',
'info': 'NULL',
'field_5': 'None',
'#fid': 10,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
2009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
3009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
4009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
5009: {
'id': 'None',
'description': 'None',
'data': 'None',
'info': 'None',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_028_substring_test():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv&subset=id%20%25%202%20%3D%201'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_029_file_watcher():
wanted = {}
wanted['uri'] = 'file://file?geomType=none&type=csv&watchFile=yes'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
1002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
1003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
4003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
5004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
6002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
6003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
9002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10003: {
'id': '6',
'description': 'mole',
'name': 'mole',
'#fid': 3,
'#geometry': 'None',
},
10004: {
'id': '7',
'description': 'badger',
'name': 'badger',
'#fid': 4,
'#geometry': 'None',
},
16002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
'Request 7 did not return any data',
'Request 11 did not return any data',
'Request 13 did not return any data',
'Request 14 did not return any data',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
]
return wanted
def test_030_filter_rect_xy_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?spatialIndex=Y&yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
3002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
3003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
3004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
3005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
3006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
3007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
3008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
3009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
3010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
4002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
4003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
4004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
4005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
4006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
4007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
4008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
4009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
4010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_031_filter_rect_wkt_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?spatialIndex=Y&delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
4007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_032_filter_rect_wkt_create_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
1007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
6003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
6004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
6005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
7002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
7003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
7004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
7005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
7006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 5 did not return any data',
]
return wanted
def test_033_reset_subset_string():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
2002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
2004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
2009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
4010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
8004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
10005: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
10010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_034_csvt_file():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'longlong', 'longlong']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_035_csvt_file2():
wanted = {}
wanted['uri'] = 'file://testcsvt2.txt?geomType=none&delimiter=|&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'integer']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_036_csvt_file_invalid_types():
wanted = {}
wanted['uri'] = 'file://testcsvt3.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testcsvt3.csv',
'File type string in testcsvt3.csvt is not correctly formatted',
]
return wanted
def test_037_csvt_file_invalid_file():
wanted = {}
wanted['uri'] = 'file://testcsvt4.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_038_type_inference():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv'
wanted['fieldTypes'] = ['text', 'double', 'double', 'text', 'text', 'integer', 'longlong', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2.0',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '0.00023',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99.0',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = []
return wanted
def test_039_issue_13749():
wanted = {}
wanted['uri'] = 'file://test13749.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'No geom',
'geom_x': 'NULL',
'geom_y': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Point1',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
4: {
'id': '3',
'description': 'Point2',
'geom_x': '15.0',
'geom_y': '23.0',
'#fid': 4,
'#geometry': 'Point (15 23)',
},
5: {
'id': '4',
'description': 'Point3',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file test13749.csv',
'1 records have missing geometry definitions',
]
return wanted
def test_040_issue_14666():
wanted = {}
wanted['uri'] = 'file://test14666.csv?yField=y&xField=x&type=csv&delimiter=\\t'
wanted['fieldTypes'] = ['integer', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': '7.15417',
'x': '7.15417',
'y': '50.680622',
'#fid': 2,
'#geometry': 'Point (7.1541699999999997 50.68062199999999962)',
},
3: {
'id': '2',
'description': '7.119219',
'x': '7.119219',
'y': '50.739814',
'#fid': 3,
'#geometry': 'Point (7.11921900000000019 50.73981400000000264)',
},
4: {
'id': '3',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': '7.129229',
'x': '7.129229',
'y': '50.703692',
'#fid': 6,
'#geometry': 'Point (7.12922899999999959 50.70369199999999665)',
},
}
wanted['log'] = [
'Errors in file test14666.csv',
'2 records have missing geometry definitions',
]
return wanted
def test_041_no_detect_type():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '23e-5',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = [
]
return wanted
def test_042_no_detect_types_csvt():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
]
return wanted
| dgoedkoop/QGIS | tests/src/python/test_qgsdelimitedtextprovider_wanted.py | Python | gpl-2.0 | 73,128 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Matmoz (<http://www.matmoz.si/>)
# <info@matmoz.si>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp import models, api, _
class formView(models.Model):
_inherit = 'project.project'
@api.multi
def button_save_data(self):
return True
@api.multi
def action_open_view_project_form(self):
context = self.env.context.copy()
context['view_buttons'] = True
view = {
'name': _('Details'),
'view_type': 'form',
'view_mode': 'form,tree,kanban,gantt',
'res_model': 'project.project',
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': self.id,
'context': context
}
return view
| xpansa/pmis | project_wbs/model/form_button.py | Python | agpl-3.0 | 1,573 |
import turtle
t = turtle.Turtle()
t.color('purple')
t.forward(99)
| regnart-tech-club/programming-concepts | course-1:basic-building-blocks/subject-4:turtle/lesson-1:`import` statement.py | Python | apache-2.0 | 67 |
import json
import random
class Board:
def __init__(self):
self.n = 0 # number of nodes
# node data
self.siteids = []
self.s2n = {} # mapping from siteid to nodeid
self.xy = [] # list of (x, y) pairs
self.ismine = [] # list of True/False
self.mines = [] # list of nodeids
# edge data
self.edges = []
self.num_edges = 0
def add_node(self, siteid = None, ismine = False, x = None, y = None):
nodeid = self.n
self.siteids.append(siteid)
if siteid is not None:
self.s2n[siteid] = nodeid
self.ismine.append(ismine)
if ismine:
self.mines.append(nodeid)
if x is None or y is None:
self.xy.append(None)
else:
self.xy.append((x, y))
self.edges.append([])
self.n += 1
# source, target are given as nodeids
def add_edge(self, source, target):
self.edges[source].append(target)
self.edges[target].append(source)
self.num_edges += 1
def from_json(j):
self = Board()
for s in j['sites']:
if 'x' in s:
self.add_node(s['id'], False, s['x'], s['y'])
else:
self.add_node(s['id'], False)
for r in j['rivers']:
self.add_edge(self.s2n[r['source']], self.s2n[r['target']])
for m in j['mines']:
a = self.s2n[m]
self.ismine[a] = True
self.mines.append(a)
return self
def from_json_file(mapfile):
with open(mapfile, 'r') as f:
j = json.load(f)
return Board.from_json(j)
def summary(self):
return '{} nodes, {} edges, {} mines'.format(self.n,
self.num_edges, len(self.mines))
def layout_initial(self):
for i in range(self.n):
self.xy[i] = (random.random(), random.random())
def layout_relax(self, numsteps = 10):
import numpy as np
n = self.n
x = np.array([xy[0] for xy in self.xy])
y = np.array([xy[1] for xy in self.xy])
connected = np.zeros((n, n), dtype = bool)
diag = np.identity(n, dtype = bool)
for i in range(n):
for j in self.edges[i]:
connected[i, j] = True
xlow = np.min(x)
xhigh = np.max(x)
ylow = np.min(y)
yhigh = np.max(y)
x = np.sqrt(n) * (x - xlow) / (xhigh - xlow)
y = np.sqrt(n) * (y - ylow) / (yhigh - ylow)
for i in range(numsteps):
dx = np.zeros((n,))
dy = np.zeros((n,))
dd = (x[:, None] - x[None, :]) ** 2 + (y[:, None] - y[None, :]) ** 2
dd[diag] = 1
force_a = np.log(dd)
# force_b = -1 / dd
force_b = -1 / np.sqrt(dd)
force_a[~connected] = 0
force_b[connected] = 0
force = force_a + force_b
force[diag] = 0
dx = np.sum(force * (x[None, :] - x[:, None]) / np.sqrt(dd), axis = 1)
dy = np.sum(force * (y[None, :] - y[:, None]) / np.sqrt(dd), axis = 1)
x += 0.1 * dx
y += 0.1 * dy
# Normalize positions to (0, 1)
xlow = np.min(x)
xhigh = np.max(x)
ylow = np.min(y)
yhigh = np.max(y)
x = (x - xlow) / (xhigh - xlow)
y = (y - ylow) / (yhigh - ylow)
for i in range(n):
self.xy[i] = (x[i], y[i])
def layout_normalize(self):
xs = [xy[0] for xy in self.xy]
ys = [xy[1] for xy in self.xy]
xlow = min(xs)
xhigh = max(xs)
ylow = min(ys)
yhigh = max(ys)
for i in range(self.n):
self.xy[i] = ((self.xy[i][0] - xlow) / (xhigh - xlow),
(self.xy[i][1] - ylow) / (yhigh - ylow))
| estansifer/icfpc2017 | src/board.py | Python | mit | 3,855 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("LGBMClassifier" , "FourClass_100" , "mssql")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_100/ws_FourClass_100_LGBMClassifier_mssql_code_gen.py | Python | bsd-3-clause | 142 |
# This file is part of Medieer.
#
# Medieer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medieer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
from sqlobject import connectionForURI, sqlhub
from models import Series, NSID, Media, Genre, Settings, Person, get_setting
TV = Media.TV
MOVIES = Media.MOVIES
media_types = Media.media_types
| toddself/Medieer | src/core/__init__.py | Python | gpl-3.0 | 891 |
# -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.recvline} and fixtures for testing related
functionality.
"""
import sys, os
from twisted.conch.insults import insults
from twisted.conch import recvline
from twisted.python import reflect, components
from twisted.internet import defer, error
from twisted.trial import unittest
from twisted.cred import portal
from twisted.test.proto_helpers import StringTransport
class Arrows(unittest.TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
# self.p.makeConnection(self.pt)
def testPrintableCharacters(self):
self.p.keystrokeReceived('x', None)
self.p.keystrokeReceived('y', None)
self.p.keystrokeReceived('z', None)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testHorizontalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testNewline(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('c')
kR('b')
kR('a')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('\n')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123', 'cba'), ()))
def testVerticalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc'), ('123',)))
self.assertEquals(self.p.currentLineBuffer(), ('123', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz',), ('abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('abc', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
def testHome(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
self.assertEquals(self.p.currentLineBuffer(), ('', 'hello, world'))
def testEnd(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
def testBackspace(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
def testDelete(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('x', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
def testInsert(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
# kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', 'z'))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', 'Az'))
def testTypeover(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', ''))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', ''))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.protocols import loopback
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + '\n' + self.ps[self.pn])
# An insults API for this would be nice.
left = "\x1b[D"
right = "\x1b[C"
up = "\x1b[A"
down = "\x1b[B"
insert = "\x1b[2~"
home = "\x1b[1~"
delete = "\x1b[3~"
end = "\x1b[4~"
backspace = "\x7f"
from twisted.cred import checkers
try:
from twisted.conch.ssh import userauth, transport, channel, connection, session
from twisted.conch.manhole_ssh import TerminalUser, TerminalSession, TerminalRealm, TerminalSessionTransport, ConchFactory
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = 'session'
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req("vt102", (self.height, self.width, 0, 0), '')
self.conn.sendRequest(self, 'pty-req', term)
self.conn.sendRequest(self, 'shell', '')
self._protocolInstance = self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs)
self._protocolInstance.factory = self
self._protocolInstance.makeConnection(self)
def closed(self):
self._protocolInstance.connectionLost(error.ConnectionDone())
def dataReceived(self, data):
self._protocolInstance.dataReceived(data)
class TestConnection(connection.SSHConnection):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
connection.SSHConnection.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def serviceStarted(self):
self.__channel = SessionChannel(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.openChannel(self.__channel)
def write(self, bytes):
return self.__channel.write(bytes)
class TestAuth(userauth.SSHUserAuthClient):
def __init__(self, username, password, *a, **kw):
userauth.SSHUserAuthClient.__init__(self, username, *a, **kw)
self.password = password
def getPassword(self):
return defer.succeed(self.password)
class TestTransport(transport.SSHClientTransport):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, username, password, width, height, *a, **kw):
# transport.SSHClientTransport.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.username = username
self.password = password
self.width = width
self.height = height
def verifyHostKey(self, hostKey, fingerprint):
return defer.succeed(True)
def connectionSecure(self):
self.__connection = TestConnection(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.requestService(
TestAuth(self.username, self.password, self.__connection))
def write(self, bytes):
return self.__connection.write(bytes)
class TestSessionTransport(TerminalSessionTransport):
def protocolFactory(self):
return self.avatar.conn.transport.factory.serverProtocol()
class TestSession(TerminalSession):
transportFactory = TestSessionTransport
class TestUser(TerminalUser):
pass
components.registerAdapter(TestSession, TestUser, session.ISession)
class LoopbackRelay(loopback.LoopbackRelay):
clearCall = None
def logPrefix(self):
return "LoopbackRelay(%r)" % (self.target.__class__.__name__,)
def write(self, bytes):
loopback.LoopbackRelay.write(self, bytes)
if self.clearCall is not None:
self.clearCall.cancel()
from twisted.internet import reactor
self.clearCall = reactor.callLater(0, self._clearBuffer)
def _clearBuffer(self):
self.clearCall = None
loopback.LoopbackRelay.clearBuffer(self)
class NotifyingExpectableBuffer(helper.ExpectableBuffer):
def __init__(self):
self.onConnection = defer.Deferred()
self.onDisconnection = defer.Deferred()
def connectionMade(self):
helper.ExpectableBuffer.connectionMade(self)
self.onConnection.callback(self)
def connectionLost(self, reason):
self.onDisconnection.errback(reason)
class _BaseMixin:
WIDTH = 80
HEIGHT = 24
def _assertBuffer(self, lines):
receivedLines = str(self.recvlineClient).splitlines()
expectedLines = lines + ([''] * (self.HEIGHT - len(lines) - 1))
self.assertEquals(len(receivedLines), len(expectedLines))
for i in range(len(receivedLines)):
self.assertEquals(
receivedLines[i], expectedLines[i],
str(receivedLines[max(0, i-1):i+1]) +
" != " +
str(expectedLines[max(0, i-1):i+1]))
def _trivialTest(self, input, output):
done = self.recvlineClient.expect("done")
self._testwrite(input)
def finished(ign):
self._assertBuffer(output)
return done.addCallback(finished)
class _SSHMixin(_BaseMixin):
def setUp(self):
if not ssh:
raise unittest.SkipTest("Crypto requirements missing, can't run historic recvline tests over ssh")
u, p = 'testuser', 'testpass'
rlm = TerminalRealm()
rlm.userFactory = TestUser
rlm.chainedProtocolFactory = lambda: insultsServer
ptl = portal.Portal(
rlm,
[checkers.InMemoryUsernamePasswordDatabaseDontUse(**{u: p})])
sshFactory = ConchFactory(ptl)
sshFactory.serverProtocol = self.serverProtocol
sshFactory.startFactory()
recvlineServer = self.serverProtocol()
insultsServer = insults.ServerProtocol(lambda: recvlineServer)
sshServer = sshFactory.buildProtocol(None)
clientTransport = LoopbackRelay(sshServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = insults.ClientProtocol(lambda: recvlineClient)
sshClient = TestTransport(lambda: insultsClient, (), {}, u, p, self.WIDTH, self.HEIGHT)
serverTransport = LoopbackRelay(sshClient)
sshClient.makeConnection(clientTransport)
sshServer.makeConnection(serverTransport)
self.recvlineClient = recvlineClient
self.sshClient = sshClient
self.sshServer = sshServer
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.sshClient.write(bytes)
from twisted.conch.test import test_telnet
class TestInsultsClientProtocol(insults.ClientProtocol,
test_telnet.TestProtocol):
pass
class TestInsultsServerProtocol(insults.ServerProtocol,
test_telnet.TestProtocol):
pass
class _TelnetMixin(_BaseMixin):
def setUp(self):
recvlineServer = self.serverProtocol()
insultsServer = TestInsultsServerProtocol(lambda: recvlineServer)
telnetServer = telnet.TelnetTransport(lambda: insultsServer)
clientTransport = LoopbackRelay(telnetServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = TestInsultsClientProtocol(lambda: recvlineClient)
telnetClient = telnet.TelnetTransport(lambda: insultsClient)
serverTransport = LoopbackRelay(telnetClient)
telnetClient.makeConnection(clientTransport)
telnetServer.makeConnection(serverTransport)
serverTransport.clearBuffer()
clientTransport.clearBuffer()
self.recvlineClient = recvlineClient
self.telnetClient = telnetClient
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.telnetClient.write(bytes)
try:
from twisted.conch import stdio
except ImportError:
stdio = None
class _StdioMixin(_BaseMixin):
def setUp(self):
# A memory-only terminal emulator, into which the server will
# write things and make other state changes. What ends up
# here is basically what a user would have seen on their
# screen.
testTerminal = NotifyingExpectableBuffer()
# An insults client protocol which will translate bytes
# received from the child process into keystroke commands for
# an ITerminalProtocol.
insultsClient = insults.ClientProtocol(lambda: testTerminal)
# A process protocol which will translate stdout and stderr
# received from the child process to dataReceived calls and
# error reporting on an insults client protocol.
processClient = stdio.TerminalProcessProtocol(insultsClient)
# Run twisted/conch/stdio.py with the name of a class
# implementing ITerminalProtocol. This class will be used to
# handle bytes we send to the child process.
exe = sys.executable
module = stdio.__file__
if module.endswith('.pyc') or module.endswith('.pyo'):
module = module[:-1]
args = [exe, module, reflect.qual(self.serverProtocol)]
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
from twisted.internet import reactor
clientTransport = reactor.spawnProcess(processClient, exe, args,
env=env, usePTY=True)
self.recvlineClient = self.testTerminal = testTerminal
self.processClient = processClient
self.clientTransport = clientTransport
# Wait for the process protocol and test terminal to become
# connected before proceeding. The former should always
# happen first, but it doesn't hurt to be safe.
return defer.gatherResults(filter(None, [
processClient.onConnection,
testTerminal.expect(">>> ")]))
def tearDown(self):
# Kill the child process. We're done with it.
try:
self.clientTransport.signalProcess("KILL")
except (error.ProcessExitedAlready, OSError):
pass
def trap(failure):
failure.trap(error.ProcessTerminated)
self.assertEquals(failure.value.exitCode, None)
self.assertEquals(failure.value.status, 9)
return self.testTerminal.onDisconnection.addErrback(trap)
def _testwrite(self, bytes):
self.clientTransport.write(bytes)
class RecvlineLoopbackMixin:
serverProtocol = EchoServer
def testSimple(self):
return self._trivialTest(
"first line\ndone",
[">>> first line",
"first line",
">>> done"])
def testLeftArrow(self):
return self._trivialTest(
insert + 'first line' + left * 4 + "xxxx\ndone",
[">>> first xxxx",
"first xxxx",
">>> done"])
def testRightArrow(self):
return self._trivialTest(
insert + 'right line' + left * 4 + right * 2 + "xx\ndone",
[">>> right lixx",
"right lixx",
">>> done"])
def testBackspace(self):
return self._trivialTest(
"second line" + backspace * 4 + "xxxx\ndone",
[">>> second xxxx",
"second xxxx",
">>> done"])
def testDelete(self):
return self._trivialTest(
"delete xxxx" + left * 4 + delete * 4 + "line\ndone",
[">>> delete line",
"delete line",
">>> done"])
def testInsert(self):
return self._trivialTest(
"third ine" + left * 3 + "l\ndone",
[">>> third line",
"third line",
">>> done"])
def testTypeover(self):
return self._trivialTest(
"fourth xine" + left * 4 + insert + "l\ndone",
[">>> fourth line",
"fourth line",
">>> done"])
def testHome(self):
return self._trivialTest(
insert + "blah line" + home + "home\ndone",
[">>> home line",
"home line",
">>> done"])
def testEnd(self):
return self._trivialTest(
"end " + left * 4 + end + "line\ndone",
[">>> end line",
"end line",
">>> done"])
class RecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, RecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run recvline tests over stdio"
class HistoricRecvlineLoopbackMixin:
serverProtocol = EchoServer
def testUpArrow(self):
return self._trivialTest(
"first line\n" + up + "\ndone",
[">>> first line",
"first line",
">>> first line",
"first line",
">>> done"])
def testDownArrow(self):
return self._trivialTest(
"first line\nsecond line\n" + up * 2 + down + "\ndone",
[">>> first line",
"first line",
">>> second line",
"second line",
">>> second line",
"second line",
">>> done"])
class HistoricRecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run historic recvline tests over stdio"
| mzdaniel/oh-mainline | vendor/packages/twisted/twisted/conch/test/test_recvline.py | Python | agpl-3.0 | 21,575 |
import random
import bisect
import numpy as np
from network_models import *
def generalize_three_pass(network_model, assign_nodes, overlay_communities, g_params, c_params):
G = network_model(g_params)
# print_seq_stats( '\t\t network_generated', G.deg)
return generalize_three_pass_network(G, assign_nodes, overlay_communities, c_params )
def generalize_three_pass_network(G, assign_nodes, overlay_communities, c_params):
C = assign_nodes(G, c_params)
print_seq_stats('\t\t node_assigned', [len(c) for c in C[0]])
return overlay_communities(G, C, c_params)
def print_seq_stats(msg, S):
print msg, ':::\t len: ',len(S),' min: ', np.min(S) ,' avg: ',np.mean(S),' max: ', np.max(S),' sum: ', np.sum(S)
def assign_CN(G, c_params):
def cn_prob(G, v, C, ec, Cid):
p = [0.1]*(len(C))
for u in G.neigh[v]:
if Cid[u]>=0: p[Cid[u]]+=1
p= [p[i] for i in ec] #remove communities that are full
p= [i/sum(p) for i in p]#np.divide(p, np.sum(p))
# print p
return p
return assign_LFR(G, c_params, cn_prob)
def random_choice(values, weights=None, size = 1, replace = True):
if weights is None:
i = int(random.random() * len(values))
# i = random.randrange(0,len(values))
# res = random.choice(values)
else :
total = 0
cum_weights = []
for w in weights:
total += w
cum_weights.append(total)
x = random.random() * total
i = bisect.bisect(cum_weights, x)
# print weights
#res = values[i]
if size <=1: return values[i]
else:
# print i, values
cval = [values[j] for j in range(len(values)) if replace or i<>j]
if weights is None: cwei=None
else: cwei = [weights[j] for j in range(len(weights)) if replace or i<>j]
# if not replace : del values[i]
return values[i], random_choice(cval, cwei, size-1, replace)
def assign_first_pass_original(G,mu, s, c, cid, prob):
# assign nodes to communities
for v in range(G.n):
# pick a community at random
ec = [i for i in range( len(c)) if s[i]>len(c[i])]
# if prob is None:
# p = None
# else:
# print prob
# print ec
# p = prob(G,v,c, cid)
# p = [p[i] for i in ec]
# p = np.divide(p, np.sum(p))
i = random_choice(ec, None if prob is None else prob(G,v,c,ec, cid))
# assign to community if fits
if s[i] >= (1- mu)*G.deg[v] :
c[i].append(v)
cid[v] = i
def assign_first_pass_NE(G,mu, s, c, cid, prob):
for v in range(G.n):
if cid[v]==-1:
# pick a community at random
ec = [i for i in range( len(c)) if s[i]>len(c[i])]
i = random_choice(ec, None if prob is None else prob(G,v,c,ec, cid))
# i = np.random_choice([i for i in range( len(c)) if s[i]>len(c[i])],
# p = None if prob is None else prob(G,v,c, cid) )
to_add = [v]
marked = [0]*G.n
marked[v] =1
while len(to_add)>0 and len(c[i])< s[i]:
v = to_add.pop(0)
# assign to community if fits
if s[i] >= (1- mu)*G.deg[v] :
c[i].append(v)
cid[v] = i
for u in G.neigh[v]:
if marked[u]==0 and cid[u]==-1:
to_add.append(u)
marked[u]=1
def assign_LFR(G, c_params, prob=None, first_pass= assign_first_pass_original, max_itr = 1000):
c_params['s_sum'] = G.n
mu = c_params['mu']
# determine capacity of communities
d_max = np.max(G.deg)
# print G.n, c_params['s_max'], d_max, (1-mu) *d_max
if c_params['s_max']< (1-mu) *d_max : c_params['s_max'] =(1-mu) * d_max
# print c_params['s_max'], d_max
s = sample_power_law(**c_params)
c_max = max(s)
# print_seq_stats('community_sizes_sampeled',s)
# initialize the communities and community ids
c = [[] for i in range(len(s))]
cid = [-1] * G.n
first_pass(G,mu, s, c, cid, prob)
# print_seq_stats('1... ',[len(l) for l in c])
# initialize the homeless queue
H = [v for v in range(G.n) if cid[v]==-1]
itr = 0
# assign homeless nodes to communities
while len(H)>0 and max_itr>itr:
itr+=1
# print itr
# pick a community at random
v = random_choice(H)
ec = [i for i in range( len(c))]
i = random_choice(ec, None if prob is None else prob(G, v,c,ec, cid) )
if s[i] >= min((1- mu)*G.deg[v], c_max) :
c[i].append(v)
cid[v]=i
H.remove(v)
# itr=0
# kick out a random node
if len(c[i])> s[i]:
u = random_choice(c[i])
c[i].remove(u)
cid[u] = -1
H.append(u)
if len(H)>0: print "Failed in 2nd run"
for v in H:
# pick a community at random
ec =[i for i in range( len(c))]
i = random_choice(ec, None if prob is None else prob(G, v,c,ec, cid) )
c[i].append(v)
cid[v]=i
return c, cid
def assign_NE(G, c_params, prob=None):
return assign_LFR(G, c_params, prob=None, first_pass= assign_first_pass_NE)
def overlay_LFR(G, C, c_params):
mu = c_params['mu']
n= G.n
deg = G.deg #degree_seq(n, edge_list)
C, Cid = C
# determine degree of each node and its between/outlink degree,
# i.e. number of edges that go outside its community
db = [0]* n
# d = [0] * n
# neigh = [[] for i in range(0, n)]
for e in G.edge_list:
u , v = e
if Cid[u] != Cid[v]:
db[u]+=1
db[v]+=1
# determine desired between changes
for v in range(n):
db[v] = np.floor(mu*G.deg[v] - db[v])
dw = np.multiply(db, -1)
# rewire edges within communities
for c in C:
I = [v for v in c if dw[v]>0]
# add internal edges
while len(I)>=2:
u, v = random_choice(I, size =2, replace = False)
G.add_edge(u,v)
dw[u]-=1
dw[v]-=1
if dw[u] ==0: I.remove(u)
if dw[v] ==0: I.remove(v)
# remove excess edges
for v in c:
if dw[v]<0:
I = [u for u in G.neigh[v] if u in c and dw[u]<0]
while len(I)>=1 and dw[v]<0:
u = random_choice(I)
G.remove_edge(u,v)
dw[u]+=1
dw[v]+=1
I.remove(u)
# rewire edges between communities
for c in C:
I = [v for v in c if db[v]>0]
O = [v for v in range(n) if v not in c and db[v]>0]
# add internal edges
while len(I)>=1 and len(O)>=1:
v = random_choice(I)
u = random_choice(O)
G.add_edge(u,v)
db[u]-=1
db[v]-=1
if db[v] ==0: I.remove(v)
if db[u] ==0: O.remove(u)
# remove excess edges
for v in c:
if db[v]<0:
O = [u for u in G.neigh[v] if u not in c and db[u]<0]
while len(O)>=1 and db[v]<0:
u = random_choice(O)
G.remove_edge(u,v)
db[u]+=1
db[v]+=1
O.remove(u)
return G, C
def configuration_model(params):
S = sample_power_law(**params)
# print_seq_stats('degree_sampled',S)
return Graph(len(S),configuration_model_from_sequence(S))
def sample_power_law( s_exp=2, n=None, s_avg=None, s_max=None, s_min=1, s_sum=None, discrete = True, **kwargs):
S = None
if n is not None: # number of samples is fixed
if s_avg is None and s_sum is not None: s_avg = s_sum*1.0/n
# 1.0/np.random.power(exp+1, size=n)
S = np.array([])
c = None
while (len(S)<n):
tmp = np.random.pareto(s_exp-1, size = n - len(S))
tmp, c = scale_truncate(tmp , s_max, s_min, s_avg , c)
S= np.hstack( (S,tmp))
S = np.around(S).astype(int) if discrete else S
elif s_sum is not None: # cumulative s_sum of samples is fixed
S = []
while (np.sum(S)<s_sum):
tmp = np.random.pareto(s_exp-1, size = int( s_sum - np.sum(S)))
tmp, c = scale_truncate(tmp , s_max, s_min ,s_avg, c=1)
tmp = np.around(tmp).astype(int) if discrete else tmp
for t in tmp:
if t+np.sum(S) <= s_sum:
S.append(t)
elif np.sum(S) < s_sum:
tmp = s_sum - np.sum(S)
if tmp>=s_min:
S.append(tmp)
else:
shift = np.ceil(tmp*1.0/len(S))
for i in range(0,len(S)):
if tmp>0:
S[i]+=shift
tmp-=shift
else: break
# print S, np.sum(S), s_sum
return S
def scale_truncate(S, max=None, min=None, avg=None, c= None):
# print c
if c==None:
c = 1
if avg is not None:
itr =0
max_itr = 100
while (itr<max_itr ):
itr+=1
c_2 = avg/np.mean(S) # - min if min is not None else 0
S = np.multiply(S,c_2)
c *= c_2
if min is not None: S = S[S>min]
if max is not None: S = S[S<max]
else:
S = np.multiply(S,c)
if min is not None: S = S[S>min]
if max is not None: S = S[S<max]
return S, c
def LFR(n, k_avg, k_max, mu, c_min, c_max, k_exp=2, c_exp=1, assign= assign_LFR, model = configuration_model):
if c_max < (1-mu) * k_max:
c_max = k_max
print 'maximum size for communities adjusted to fit the node with largest degree'
return generalize_three_pass(model, assign, overlay_LFR,
g_params = {"n":n, "s_avg":k_avg, "s_max":k_max, "s_exp":k_exp},
c_params= { "mu": mu, "s_min": c_min, "s_max":c_max, "s_exp":c_exp} )
| rabbanyk/FARZ | src/three_pass_benchmarks.py | Python | mit | 10,449 |
# Copyright (c) 2015 Intel Corporation
# Copyright (c) 2015 ISPRAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import conductor
from sahara import context
from sahara.plugins.cdh import abstractversionhandler as avm
from sahara.plugins.cdh import db_helper
from sahara.plugins.cdh.v5_4_0 import cloudera_utils as cu
from sahara.plugins.cdh.v5_4_0 import config_helper as c_helper
from sahara.plugins.cdh.v5_4_0 import deploy as dp
from sahara.plugins.cdh.v5_4_0 import edp_engine
from sahara.plugins.cdh.v5_4_0 import validation as vl
conductor = conductor.API
CU = cu.ClouderaUtilsV540()
class VersionHandler(avm.AbstractVersionHandler):
def get_plugin_configs(self):
return c_helper.get_plugin_configs()
def get_node_processes(self):
return {
"CLOUDERA": ['CLOUDERA_MANAGER'],
"HDFS": [],
"NAMENODE": ['HDFS_NAMENODE'],
"DATANODE": ['HDFS_DATANODE'],
"SECONDARYNAMENODE": ['HDFS_SECONDARYNAMENODE'],
"YARN": [],
"RESOURCEMANAGER": ['YARN_RESOURCEMANAGER'],
"NODEMANAGER": ['YARN_NODEMANAGER'],
"JOBHISTORY": ['YARN_JOBHISTORY'],
"OOZIE": ['OOZIE_SERVER'],
"HIVE": [],
"HIVESERVER": ['HIVE_SERVER2'],
"HIVEMETASTORE": ['HIVE_METASTORE'],
"WEBHCAT": ['HIVE_WEBHCAT'],
"HUE": ['HUE_SERVER'],
"SPARK_ON_YARN": ['SPARK_YARN_HISTORY_SERVER'],
"ZOOKEEPER": ['ZOOKEEPER_SERVER'],
"HBASE": [],
"MASTER": ['HBASE_MASTER'],
"REGIONSERVER": ['HBASE_REGIONSERVER'],
"FLUME": ['FLUME_AGENT'],
"IMPALA": [],
"CATALOGSERVER": ['IMPALA_CATALOGSERVER'],
"STATESTORE": ['IMPALA_STATESTORE'],
"IMPALAD": ['IMPALAD'],
"KS_INDEXER": ['KEY_VALUE_STORE_INDEXER'],
"SOLR": ['SOLR_SERVER'],
"SQOOP": ['SQOOP_SERVER'],
"SENTRY": ['SENTRY_SERVER'],
"KMS": ['KMS'],
"YARN_GATEWAY": [],
"HDFS_GATEWAY": []
}
def validate(self, cluster):
vl.validate_cluster_creating(cluster)
def configure_cluster(self, cluster):
dp.configure_cluster(cluster)
def start_cluster(self, cluster):
dp.start_cluster(cluster)
self._set_cluster_info(cluster)
def decommission_nodes(self, cluster, instances):
dp.decommission_cluster(cluster, instances)
def validate_scaling(self, cluster, existing, additional):
vl.validate_existing_ng_scaling(cluster, existing)
vl.validate_additional_ng_scaling(cluster, additional)
def scale_cluster(self, cluster, instances):
dp.scale_cluster(cluster, instances)
def _set_cluster_info(self, cluster):
mng = CU.pu.get_manager(cluster)
info = {
'Cloudera Manager': {
'Web UI': 'http://%s:7180' % mng.management_ip,
'Username': 'admin',
'Password': db_helper.get_cm_password(cluster)
}
}
hue = CU.pu.get_hue(cluster)
if hue:
info['Hue Dashboard'] = {
'Web UI': 'http://%s:8888' % hue.management_ip
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
if job_type in edp_engine.EdpSparkEngine.get_supported_job_types():
return edp_engine.EdpSparkEngine(cluster)
return None
def get_edp_job_types(self):
return (edp_engine.EdpOozieEngine.get_supported_job_types() +
edp_engine.EdpSparkEngine.get_supported_job_types())
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
return dp.get_open_ports(node_group)
| ekasitk/sahara | sahara/plugins/cdh/v5_4_0/versionhandler.py | Python | apache-2.0 | 4,577 |
from unittest import TestCase
from cloudshell.cp.vcenter.models.ActionResult import ActionResult
from cloudshell.cp.vcenter.models.ConnectionResult import ConnectionResult
from cloudshell.cp.vcenter.models.DriverResponse import DriverResponse, DriverResponseRoot
from cloudshell.cp.vcenter.common.utilites.command_result import get_result_from_command_output, set_command_result
class TestCommandResult(TestCase):
def test_get_result_from_command_output_with_result(self):
result = get_result_from_command_output('{"result":"MY RESULT1"}')
self.assertEqual(result["result"], 'MY RESULT1')
def test_command_result_empty(self):
result = get_result_from_command_output('')
self.assertEqual(result, None)
def test_get_result_from_command_output_with_result_unpickable_true(self):
connection_result = ConnectionResult(mac_address='AA', vm_uuid='BB', network_name='CC', network_key='DD',
requested_vnic='EE', vnic_name='FF')
output_result = set_command_result(result=connection_result, unpicklable=True)
result = get_result_from_command_output(output_result)
self.assertEqual(result.mac_address, 'AA')
self.assertEqual(result.vm_uuid, 'BB')
self.assertEqual(result.network_name, 'CC')
self.assertEqual(result.network_key, 'DD')
self.assertEqual(result.requested_vnic, 'EE')
self.assertEqual(result.vnic_name, 'FF')
def test_get_result_from_command_output_with_result_unpickable_false(self):
connection_result = ConnectionResult(mac_address='AA', vm_uuid='BB', network_name='CC', network_key='DD',
requested_vnic='EE', vnic_name='FF')
output_result = set_command_result(result=[connection_result], unpicklable=False)
results = get_result_from_command_output(output_result)
self.assertEqual(results[0]['mac_address'], 'AA')
self.assertEqual(results[0]['vm_uuid'], 'BB')
self.assertEqual(results[0]['network_name'], 'CC')
self.assertEqual(results[0]['network_key'], 'DD')
self.assertEqual(results[0]['requested_vnic'], 'EE')
self.assertEqual(results[0]['vnic_name'], 'FF')
def test_get_result_from_command_output_with_result(self):
output_result = '[{"py/object": "cloudshell.cp.vcenter.models.ConnectionResult.ConnectionResult", "vm_uuid": "422258ab-47e9-d57c-3741-6832a432bc3a", "network_name": "QualiSB/anetwork", "mac_address": "00:50:56:a2:23:76"}]'
results = get_result_from_command_output(output_result)
self.assertEqual(results[0].mac_address, '00:50:56:a2:23:76')
def test_get_result_from_command_output_with_result_unpickable_false(self):
output_result = '[{"vm_uuid": "422258c6-15d0-0646-d5e7-f2cb411eee94", "network_name": "QualiSB/anetwork", "mac_address": "00:50:56:a2:6c:04"}]'
results = get_result_from_command_output(output_result)
for result in results:
v = dict(result)
self.assertEqual(result['mac_address'], '00:50:56:a2:6c:04')
def test_set_command_result(self):
action_result = ActionResult()
action_result.actionId = 'A'
action_result.errorMessage = ''
action_result.infoMessage = ''
action_result.success = True
action_result.type = 'setVlan'
action_result.updatedInterface = 'AA-BB'
driver_response = DriverResponse()
driver_response.actionResults = [action_result]
driver_response_root = DriverResponseRoot()
driver_response_root.driverResponse = driver_response
result = set_command_result(driver_response_root)
self.assertEqual(result, '{"driverResponse": {"actionResults": [{"success": true, "updatedInterface": "AA-BB", "errorMessage": "", "infoMessage": "", "actionId": "A", "type": "setVlan"}]}}')
| QualiSystems/vCenterShell | package/cloudshell/tests/test_common/test_utilities/test_command_result.py | Python | apache-2.0 | 3,910 |
#!/usr/bin/python
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|-|S|p|y|.|c|o|.|u|k|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# wii_remote_1.py
# Connect a Nintendo Wii Remote via Bluetooth
# and read the button states in Python.
#
# Project URL :
# http://www.raspberrypi-spy.co.uk/?p=1101
#
# Author : Matt Hawkins
# Date : 30/01/2013
# -----------------------
# Import required Python libraries
# -----------------------
import cwiid
import time
button_delay = 0.1
print 'Press 1 + 2 on your Wii Remote now ...'
time.sleep(1)
# Connect to the Wii Remote. If it times out
# then quit.
try:
wii=cwiid.Wiimote()
except RuntimeError:
print "Error opening wiimote connection"
quit()
print 'Wii Remote connected...\n'
print 'Press some buttons!\n'
print 'Press PLUS and MINUS together to disconnect and quit.\n'
wii.rpt_mode = cwiid.RPT_BTN | cwiid.RPT_NUNCHUK | cwiid.RPT_ACC
while True:
buttons = wii.state['buttons']
# If Plus and Minus buttons pressed
# together then rumble and quit.
if (buttons - cwiid.BTN_PLUS - cwiid.BTN_MINUS == 0):
print '\nClosing connection ...'
wii.rumble = 1
time.sleep(1)
wii.rumble = 0
exit(wii)
# Check if other buttons are pressed by
# doing a bitwise AND of the buttons number
# and the predefined constant for that button.
if (buttons & cwiid.BTN_LEFT):
print 'Left pressed'
time.sleep(button_delay)
if(buttons & cwiid.BTN_RIGHT):
print 'Right pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_UP):
print 'Up pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_DOWN):
print 'Down pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_1):
print 'Button 1 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_2):
print 'Button 2 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_A):
print 'Button A pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_B):
print 'Button B pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_HOME):
print 'Home Button pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_MINUS):
print 'Minus Button pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_PLUS):
print 'Plus Button pressed'
time.sleep(button_delay)
| JohnOmernik/pimeup | wiiremote/wi1.py | Python | apache-2.0 | 2,351 |
"""Auxiliary classes and functions to support the model"""
from re import match
from .odata_object_base import Guid
from . import *
def get_object_class(odata_context, odata_type=None):
"""Returns class corresponding to the odata context and type specified by parameters.
:param odata_context: odata context.
:param odata_type: odata type. Optional.
:return: python class name"""
# If we have odata type we don't need the context
if odata_type:
odata_type = odata_type.lstrip('#')
if odata_type in ODATA_TYPE_TO_PYTHON:
return ODATA_TYPE_TO_PYTHON[odata_type]
else:
raise ValueError("Unknown odata type: " + odata_type)
try:
# Remove leading part of URL and trailing /$entity
t = odata_context.split('#')[1]
t = t.replace("/$entity", "")
except IndexError:
raise ValueError("Unknown odata context: " + odata_context)
# Find out what the context is
if match(r"Collection\(", t):
# It's a collection of object type
odata_type = match(r"Collection\((?P<type>[0-9a-zA-Z_]+)\)", t).groupdict()['type']
if odata_type in ODATA_TYPE_TO_PYTHON:
return ODATA_TYPE_TO_PYTHON[odata_type]
else:
raise ValueError("Unknown odata context: " + odata_context)
try:
context_dic = match(r"(?P<root>[0-9a-zA-Z_]+)(?P<branch>[/\(].+)?", t).groupdict()
except AttributeError:
raise ValueError("Unknown odata context: " + odata_context)
root, branch = context_dic['root'], context_dic['branch']
if root in ODATA_TYPE_TO_PYTHON:
# It's an object type
if not branch:
return ODATA_TYPE_TO_PYTHON[root]
else:
raise ValueError("Unknown odata context: " + odata_context)
if root in ODATA_CONTAINER_TYPE:
if not branch or match(r"\([^\(\)]+\)$", branch):
# It's just a container
return ODATA_TYPE_TO_PYTHON[ODATA_CONTAINER_TYPE[root]]
else:
try:
branch_dict = match(r"/(?P<derived>[0-9a-zA-Z_]+)(?P<derived_branch>[/\(].+)?", branch).groupdict()
derived, derived_branch = branch_dict['derived'], branch_dict['derived_branch']
if derived in ODATA_TYPE_TO_PYTHON:
# It's a derived type
if not derived_branch or match(r"\([^\(\)]+\)$", derived_branch):
# It's just a derived type from a container
return ODATA_TYPE_TO_PYTHON[derived]
else:
prop = match(r"\(\S+\)/(?P<prop>[0-9a-zA-Z_]+)", derived_branch).groupdict()['prop']
if len(prop):
# It's a property of a derived type
return ODATA_TYPE_TO_PYTHON[ODATA_PROPERTY_TYPE[derived][prop]]
raise ValueError("Unknown odata context: " + odata_context)
except AttributeError:
# It's not a derived type
pass
try:
prop = match(r"\(\S+\)/(?P<prop>[0-9a-zA-Z_]+)", branch).groupdict()['prop']
if len(prop):
# It's a property of a container type
return ODATA_TYPE_TO_PYTHON[ODATA_PROPERTY_TYPE[ODATA_CONTAINER_TYPE[root]][prop]]
except AttributeError:
pass
# It must be something we didn't think about
raise ValueError("Unknown odata context: " + odata_context)
| elexpander/odataPyModel | input/extension.py | Python | mit | 3,538 |
# -*- coding: UTF-8 -*-
from datetime import datetime
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.mail import Mail
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
import os
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
mail = Mail()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
pagedown = PageDown()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
mail.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1.0')
return app
| taogeT/flask_web_development_python3 | app/__init__.py | Python | gpl-3.0 | 1,190 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
#
#
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cluster import Cluster
from cassandra.protocol import ConfigurationException
from tests.integration import use_singledc, PROTOCOL_VERSION
from tests.integration.datatype_utils import update_datatypes
def setup_module():
use_singledc()
update_datatypes()
class ControlConnectionTests(unittest.TestCase):
def setUp(self):
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest(
"Native protocol 3,0+ is required for UDTs using %r"
% (PROTOCOL_VERSION,))
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
def tearDown(self):
try:
self.session.execute("DROP KEYSPACE keyspacetodrop ")
except (ConfigurationException):
# we already removed the keyspace.
pass
self.cluster.shutdown()
def test_drop_keyspace(self):
"""
Test to validate that dropping a keyspace with user defined types doesn't kill the control connection.
Creates a keyspace, and populates with a user defined type. It then records the control_connection's id. It
will then drop the keyspace and get the id of the control_connection again. They should be the same. If they are
not dropping the keyspace likely caused the control connection to be rebuilt.
@since 2.7.0
@jira_ticket PYTHON-358
@expected_result the control connection is not killed
@test_category connection
"""
self.session = self.cluster.connect()
self.session.execute("""
CREATE KEYSPACE keyspacetodrop
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
self.session.set_keyspace("keyspacetodrop")
self.session.execute("CREATE TYPE user (age int, name text)")
self.session.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
cc_id_pre_drop = id(self.cluster.control_connection._connection)
self.session.execute("DROP KEYSPACE keyspacetodrop")
cc_id_post_drop = id(self.cluster.control_connection._connection)
self.assertEqual(cc_id_post_drop, cc_id_pre_drop)
def test_get_control_connection_host(self):
"""
Test to validate Cluster.get_control_connection_host() metadata
@since 3.5.0
@jira_ticket PYTHON-583
@expected_result the control connection metadata should accurately reflect cluster state.
@test_category metadata
"""
host = self.cluster.get_control_connection_host()
self.assertEqual(host, None)
self.session = self.cluster.connect()
cc_host = self.cluster.control_connection._connection.host
host = self.cluster.get_control_connection_host()
self.assertEqual(host.address, cc_host)
self.assertEqual(host.is_up, True)
# reconnect and make sure that the new host is reflected correctly
self.cluster.control_connection._reconnect()
new_host = self.cluster.get_control_connection_host()
self.assertNotEqual(host, new_host)
| thelastpickle/python-driver | tests/integration/standard/test_control_connection.py | Python | apache-2.0 | 3,777 |
from Query.Operator import Operator
class Select(Operator):
def __init__(self, subPlan, selectExpr, **kwargs):
super().__init__(**kwargs)
self.subPlan = subPlan
self.selectExpr = selectExpr
# Returns the output schema of this operator
def schema(self):
return self.subPlan.schema()
# Returns any input schemas for the operator if present
def inputSchemas(self):
return [self.subPlan.schema()]
# Returns a string describing the operator type
def operatorType(self):
return "Select"
# Returns child operators if present
def inputs(self):
return [self.subPlan]
# Iterator abstraction for selection operator.
def __iter__(self):
self.initializeOutput()
self.inputIterator = iter(self.subPlan)
self.inputFinished = False
if not self.pipelined:
self.outputIterator = self.processAllPages()
return self
def __next__(self):
if self.pipelined:
while not(self.inputFinished or self.isOutputPageReady()):
try:
pageId, page = next(self.inputIterator)
self.processInputPage(pageId, page)
except StopIteration:
self.inputFinished = True
return self.outputPage()
else:
return next(self.outputIterator)
# Page processing and control methods
# Page-at-a-time operator processing
def processInputPage(self, pageId, page):
schema = self.subPlan.schema()
if set(locals().keys()).isdisjoint(set(schema.fields)):
for inputTuple in page:
# Load tuple fields into the select expression context
selectExprEnv = self.loadSchema(schema, inputTuple)
# Execute the predicate.
if eval(self.selectExpr, globals(), selectExprEnv):
self.emitOutputTuple(inputTuple)
else:
raise ValueError("Overlapping variables detected with operator schema")
# Set-at-a-time operator processing
def processAllPages(self):
if self.inputIterator is None:
self.inputIterator = iter(self.subPlan)
# Process all pages from the child operator.
try:
for (pageId, page) in self.inputIterator:
self.processInputPage(pageId, page)
# No need to track anything but the last output page when in batch mode.
if self.outputPages:
self.outputPages = [self.outputPages[-1]]
# To support pipelined operation, processInputPage may raise a
# StopIteration exception during its work. We catch this and ignore in batch mode.
except StopIteration:
pass
# Return an iterator to the output relation
return self.storage.pages(self.relationId())
# Plan and statistics information
# Returns a single line description of the operator.
def explain(self):
return super().explain() + "(predicate='" + str(self.selectExpr) + "')"
| yliu120/dbsystem | HW3/dbsys-hw3/update/dbsys-hw3/Query/Operators/Select.py | Python | apache-2.0 | 2,892 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from pdf_crawler_test.urls import urlpatterns as pdf_crawler_test_urls
urlpatterns = [
url(r'^', include(pdf_crawler_test_urls, namespace='pdf_crawler_test')),
]
| pkeeper/pdf-crawler-test | tests/urls.py | Python | mit | 292 |
from __future__ import absolute_import
from .__main__ import app
__all__ = ['app']
| msabramo/tally | tally/web/__init__.py | Python | mit | 85 |
# # stdlib
# from typing import Any
# from typing import Dict
# from typing import Iterable
# from typing import List
# from typing import Tuple
# # third party
# import pytest
# # syft absolute
# from syft.core.smpc.store import CryptoStore
# from syft.core.smpc.store import register_primitive_store_add
# from syft.core.smpc.store import register_primitive_store_get
# # Rasswanth : Fix tests after solving .get() issues
# @pytest.mark.skip
# @pytest.mark.smpc
# @register_primitive_store_get("test_crypto_store")
# def provider_test_get(
# store: Dict[str, List[Any]], nr_instances: int
# ) -> List[Tuple[int]]:
# return [store["test_key_store"][i] for i in range(nr_instances)]
# @pytest.mark.skip
# @register_primitive_store_add("test_crypto_store")
# def provider_test_add(
# store: Dict[str, List[Any]], primitives: Iterable[Any]
# ) -> List[Tuple[int]]:
# store["test_key_store"] = primitives
# @pytest.mark.skip
# def test_add_store() -> None:
# crypto_store = CryptoStore()
# primitives = list(range(100))
# crypto_store.populate_store("test_crypto_store", primitives)
# crypto_store.store["test_key_store"] == primitives
# @pytest.mark.skip
# @pytest.mark.parametrize("nr_instances", [1, 5, 7, 100])
# def test_get_store(nr_instances: int) -> None:
# crypto_store = CryptoStore()
# primitives = list(range(100))
# crypto_store.store["test_key_store"] = primitives
# primitives_store = crypto_store.get_primitives_from_store(
# "test_crypto_store", nr_instances
# )
# assert primitives[:nr_instances] == primitives_store
| OpenMined/PySyft | tests/integration/smpc/store/crypto_store_test.py | Python | apache-2.0 | 1,613 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Jinja template handler."""
from __future__ import unicode_literals
import os
import json
from collections import deque
try:
import jinja2
from jinja2 import meta
except ImportError:
jinja2 = None # NOQA
from nikola.plugin_categories import TemplateSystem
from nikola.utils import makedirs, req_missing
class JinjaTemplates(TemplateSystem):
"""Support for Jinja2 templates."""
name = "jinja"
lookup = None
dependency_cache = {}
def __init__(self):
"""Initialize Jinja2 environment with extended set of filters."""
if jinja2 is None:
return
self.lookup = jinja2.Environment()
self.lookup.trim_blocks = True
self.lookup.lstrip_blocks = True
self.lookup.filters['tojson'] = json.dumps
self.lookup.globals['enumerate'] = enumerate
self.lookup.globals['isinstance'] = isinstance
self.lookup.globals['tuple'] = tuple
def set_directories(self, directories, cache_folder):
"""Create a new template lookup with set directories."""
if jinja2 is None:
req_missing(['jinja2'], 'use this theme')
self.directories = directories
self.create_lookup()
def inject_directory(self, directory):
"""Add a directory to the lookup and recreate it if it's not there yet."""
if directory not in self.directories:
self.directories.append(directory)
self.create_lookup()
def create_lookup(self):
"""Create a template lookup."""
self.lookup.loader = jinja2.FileSystemLoader(self.directories,
encoding='utf-8')
def set_site(self, site):
"""Set the Nikola site."""
self.site = site
self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])
def render_template(self, template_name, output_name, context):
"""Render the template into output_name using context."""
if jinja2 is None:
req_missing(['jinja2'], 'use this theme')
template = self.lookup.get_template(template_name)
output = template.render(**context)
if output_name is not None:
makedirs(os.path.dirname(output_name))
with open(output_name, 'w+') as output:
output.write(output.encode('utf8'))
return output
def render_template_to_string(self, template, context):
"""Render template to a string using context."""
return self.lookup.from_string(template).render(**context)
def template_deps(self, template_name):
"""Generate list of dependencies for a template."""
# Cache the lists of dependencies for each template name.
if self.dependency_cache.get(template_name) is None:
# Use a breadth-first search to find all templates this one
# depends on.
queue = deque([template_name])
visited_templates = set([template_name])
deps = []
while len(queue) > 0:
curr = queue.popleft()
source, filename = self.lookup.loader.get_source(self.lookup,
curr)[:2]
deps.append(filename)
ast = self.lookup.parse(source)
dep_names = meta.find_referenced_templates(ast)
for dep_name in dep_names:
if (dep_name not in visited_templates and dep_name is not None):
visited_templates.add(dep_name)
queue.append(dep_name)
self.dependency_cache[template_name] = deps
return self.dependency_cache[template_name]
| masayuko/nikola | nikola/plugins/template/jinja.py | Python | mit | 4,861 |
from datetime import datetime, timedelta
from random import random
import sha
from django.conf import settings
from django.db import models, IntegrityError
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from emailconfirmation.utils import get_send_mail
send_mail = get_send_mail()
# this code based in-part on django-registration
class EmailAddressManager(models.Manager):
def add_email(self, user, email):
try:
email_address = self.create(user=user, email=email)
EmailConfirmation.objects.send_confirmation(email_address)
return email_address
except IntegrityError:
return None
def get_primary(self, user):
try:
return self.get(user=user, primary=True)
except EmailAddress.DoesNotExist:
return None
def get_users_for(self, email):
"""
returns a list of users with the given email.
"""
# this is a list rather than a generator because we probably want to do a len() on it right away
return [address.user for address in EmailAddress.objects.filter(verified=True, email=email)]
class EmailAddress(models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
verified = models.BooleanField(default=False)
primary = models.BooleanField(default=False)
objects = EmailAddressManager()
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
self.user.email = self.email
self.user.save()
return True
def __unicode__(self):
return u"%s (%s)" % (self.email, self.user)
class Meta:
unique_together = (
("user", "email"),
)
class EmailConfirmationManager(models.Manager):
def confirm_email(self, confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return None
if not confirmation.key_expired():
email_address = confirmation.email_address
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
return email_address
def send_confirmation(self, email_address):
salt = sha.new(str(random())).hexdigest()[:5]
confirmation_key = sha.new(salt + email_address.email).hexdigest()
current_site = Site.objects.get_current()
activate_url = u"http://%s%s" % (
unicode(current_site.domain),
reverse("emailconfirmation.views.confirm_email", args=(confirmation_key,))
)
context = {
"user": email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"confirmation_key": confirmation_key,
}
subject = render_to_string("emailconfirmation/email_confirmation_subject.txt", context)
message = render_to_string("emailconfirmation/email_confirmation_message.txt", context)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [email_address.email], priority="high")
return self.create(email_address=email_address, sent=datetime.now(), confirmation_key=confirmation_key)
def delete_expired_confirmations(self):
for confirmation in self.all():
if confirmation.key_expired():
confirmation.delete()
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress)
sent = models.DateTimeField()
confirmation_key = models.CharField(max_length=40)
objects = EmailConfirmationManager()
def key_expired(self):
expiration_date = self.sent + timedelta(days=settings.EMAIL_CONFIRMATION_DAYS)
return expiration_date <= datetime.now()
key_expired.boolean = True
def __unicode__(self):
return u"confirmation for %s" % self.email_address
| davemerwin/blue-channel | external_apps/emailconfirmation/models.py | Python | bsd-3-clause | 4,360 |
import os
import socket
import subprocess
import sys
host = '127.0.0.1'
port = 443
def connect():
# Create socket & connect to server
global s
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
print "[-] Cannot create socket."
try:
s.connect((host, port))
except socket.error:
print "[-] Cannot connect to server."
# Interactive Shell (For Hidden, shell=False)
def main():
global srv_cmd
srv_cmd = s.recv(4096)
print "waiting for cmd."
while True:
if srv_cmd == "1":
uname = os.popen("uname -a").read()
print "Sending Results."
try:
s.send(uname)
print uname
except socket.error:
print "Cannot Send ID"
exit()
elif srv_cmd == "2":
print "[+] Target ID: "
s.close()
exit()
s.close()
connect()
main()
| Freshnuts/Multiprocessing-Practice | mp_client.py | Python | gpl-3.0 | 961 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest
from qutebrowser.utils import usertypes
from PyQt5.QtCore import Qt, QObject, pyqtSignal
class FakeKeyparser(QObject):
"""A fake BaseKeyParser which doesn't handle anything."""
request_leave = pyqtSignal(usertypes.KeyMode, str)
def __init__(self):
super().__init__()
self.passthrough = False
def handle(self, evt):
return False
@pytest.fixture
def modeman(mode_manager):
mode_manager.register(usertypes.KeyMode.normal, FakeKeyparser())
return mode_manager
@pytest.mark.parametrize('key, modifiers, text, filtered', [
(Qt.Key_A, Qt.NoModifier, 'a', True),
(Qt.Key_Up, Qt.NoModifier, '', False),
# https://github.com/The-Compiler/qutebrowser/issues/1207
(Qt.Key_A, Qt.ShiftModifier, 'A', True),
(Qt.Key_A, Qt.ShiftModifier | Qt.ControlModifier, 'x', False),
])
def test_non_alphanumeric(key, modifiers, text, filtered,
fake_keyevent_factory, modeman):
"""Make sure non-alphanumeric keys are passed through correctly."""
evt = fake_keyevent_factory(key=key, modifiers=modifiers, text=text)
assert modeman.eventFilter(evt) == filtered
| halfwit/qutebrowser | tests/unit/keyinput/test_modeman.py | Python | gpl-3.0 | 1,957 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CollectiveAllReduceStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import cross_tower_utils
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class CollectiveAllReduceStrategyTestBase(
multi_worker_test_base.MultiWorkerTestBase):
collective_key_base = 0
def setUp(self):
self._run_options = config_pb2.RunOptions()
self._run_options.experimental.collective_graph_key = 6
self._sess_config = config_pb2.ConfigProto()
# We use a different key_base for each test so that collective keys won't be
# reused.
# TODO(yuefengz, tucker): enable it to reuse collective keys in different
# tests.
CollectiveAllReduceStrategyTestBase.collective_key_base += 100000
super(CollectiveAllReduceStrategyTestBase, self).setUp()
def _get_test_object(self, task_type, task_id, num_gpus=0):
distribution = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
num_gpus_per_worker=num_gpus)
if task_type and task_id is not None:
distribution.configure(
session_config=self._sess_config,
cluster_spec=self._cluster_spec,
task_type=task_type,
task_id=task_id)
collective_keys = cross_tower_utils.CollectiveKeys(
group_key_start=10 * num_gpus +
CollectiveAllReduceStrategyTestBase.collective_key_base,
instance_key_start=num_gpus * 100 +
CollectiveAllReduceStrategyTestBase.collective_key_base,
instance_key_with_id_start=num_gpus * 10000 +
CollectiveAllReduceStrategyTestBase.collective_key_base)
distribution._collective_keys = collective_keys
distribution._cross_tower_ops._collective_keys = collective_keys
if task_type and task_id is not None:
return distribution, 'grpc://' + self._cluster_spec[task_type][task_id]
else:
return distribution, ''
def _test_minimize_loss_graph(self, task_type, task_id, num_gpus):
d, master_target = self._get_test_object(task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.test_session(config=self._sess_config,
target=master_target) as sess, \
d.scope():
l = core.Dense(1, use_bias=False, name='gpu_%d' % d._num_gpus_per_worker)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_tower(grad_fn, one)
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.reduce(
variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(
d.update(v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
if context.num_gpus() < d._num_gpus_per_worker:
return True
sess.run(
variables.global_variables_initializer(), options=self._run_options)
for i in range(10):
b, a = sess.run((before_out, after_out), options=self._run_options)
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
return error_after < error_before
def _test_complex_model(self, task_type, task_id, num_gpus):
d, master_target = self._get_test_object(task_type, task_id, num_gpus)
def model_fn():
"""Mnist model with synthetic input."""
data_format = 'channels_last'
input_shape = [28, 28, 1]
l = keras.layers
max_pool = l.MaxPooling2D((2, 2), (2, 2),
padding='same',
data_format=data_format)
model = keras.Sequential([
l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Flatten(),
l.Dense(1024, activation=nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
image = random_ops.random_uniform([2, 28, 28])
label = random_ops.random_uniform([2, 1], maxval=10, dtype=dtypes.int32)
logits = model(image, training=True)
loss = losses.sparse_softmax_cross_entropy(labels=label, logits=logits)
optimizer = adam.AdamOptimizer(learning_rate=1e-4)
train_op = optimizer.minimize(loss,
training_util.get_or_create_global_step())
return train_op
with ops.Graph().as_default(), \
self.test_session(config=self._sess_config,
target=master_target) as sess:
with d.scope():
train_op = d.call_for_each_tower(model_fn)
train_op = d.group(d.unwrap(train_op))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
return True
def _test_variable_initialization(self, task_type, task_id, num_gpus):
distribution, master_target = self._get_test_object(task_type, task_id,
num_gpus)
with ops.Graph().as_default(), \
self.test_session(config=self._sess_config,
target=master_target) as sess, \
distribution.scope():
def model_fn():
x = variable_scope.get_variable(
'x',
shape=(2, 3),
initializer=init_ops.random_uniform_initializer(
1.0, 10.0, dtype=dtypes.float32))
return array_ops.identity(x)
x = distribution.call_for_each_tower(model_fn)
reduced_x = distribution.unwrap(
distribution.reduce(
variable_scope.VariableAggregation.MEAN, x,
destinations='/cpu:0'))[0]
x = distribution.unwrap(x)[0]
sess.run(
variables.global_variables_initializer(), options=self._run_options)
x_value, reduced_x_value = sess.run(
[x, reduced_x], options=self._run_options)
self.assertTrue(
np.allclose(x_value, reduced_x_value, atol=1e-5),
msg=('x_value = %r, reduced_x_value = %r' % (x_value,
reduced_x_value)))
return np.allclose(x_value, reduced_x_value, atol=1e-5)
class DistributedCollectiveAllReduceStrategyTest(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testVariableInitialization(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_complex_model, self._cluster_spec, num_gpus=num_gpus)
class DistributedCollectiveAllReduceStrategyTestWithChief(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0, has_chief=True)
def setUp(self):
super(DistributedCollectiveAllReduceStrategyTestWithChief, self).setUp()
self._run_options.experimental.collective_graph_key = 7
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testVariableInitialization(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_complex_model, self._cluster_spec, num_gpus=num_gpus)
class LocalCollectiveAllReduceStrategy(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
def testMinimizeLossGraph(self, num_gpus=2):
# Collective ops doesn't support strategy with one device.
if context.num_gpus() < num_gpus:
return
self._test_minimize_loss_graph(None, None, num_gpus)
def testComplexModel(self, num_gpus=2):
# Collective ops doesn't support strategy with one device.
if context.num_gpus() < num_gpus:
return
self._test_complex_model(None, None, num_gpus)
if __name__ == '__main__':
test.main()
| snnn/tensorflow | tensorflow/contrib/distribute/python/collective_all_reduce_strategy_test.py | Python | apache-2.0 | 12,633 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cbh_core_model', '0023_auto_20151105_1850'),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('email', models.CharField(unique=True, max_length=100)),
('first_name', models.TextField(default=b'', null=True, blank=True)),
('last_name', models.TextField(default=b'', null=True, blank=True)),
('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('projects', models.ManyToManyField(to='cbh_core_model.Project', blank=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='pinnedcustomfield',
name='field_type',
field=models.CharField(default=b'char', max_length=15, choices=[(b'text', b'Short text field'), (b'char', b'Short text field'), (b'textarea', b'Full text'), (b'uiselect', b'Choice field'), (b'integer', b'Integer field'), (b'number', b'Decimal field'), (b'uiselecttag', b'Choice allowing create'), (b'uiselecttags', b'Tags field allowing create'), (b'percentage', b'Percentage field'), (b'date', b'Date Field'), (b'href', b'Link to server or external'), (b'imghref', b'Image link to embed'), (b'decimal', b'Decimal field'), (b'boolean', b'checkbox'), (b'related', b'TEST')]),
preserve_default=True,
),
]
| thesgc/chembiohub_ws | cbh_core_model/migrations/0024_auto_20151203_1058.py | Python | gpl-3.0 | 2,359 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.dispatch import Signal
# triggered when a shop product must be indexed
index_catalog_shop_product = Signal(providing_args=["shop_product"])
| shoopio/shoop | shuup/core/catalog/signals.py | Python | agpl-3.0 | 407 |
import cv2
import io
import base64
import numpy as np
import pandas as pd
from subprocess import Popen, PIPE
class VideoAnalysis(object):
"""
TODO:
- Define common interfaces on similar functions
- Define what format video will come in as
- Probably want a preprocessed dataframe with image, time, (and maybe features)
"""
def __init__(self):
"""
Default constructor
"""
self.features = pd.DataFrame()
@staticmethod
def detect_cut(video_file_path, time_pd_series):
"""
Detect where the scene boundaries ("cuts") are in the video.
Uses FFMPEG utility.
Args:
video_file_path: Path to video file
time_pd_series: pandas series with timestamps for every frame
Returns:
A dataframe of with each frame labeled as whether it is a
scene boundary.
"""
## Create output array, initialize with all zeros
time_df = pd.DataFrame(time_pd_series)
time_df.columns = ["time"]
out_df = time_df.copy()
out_df['is_scene_transition'] = 0
## Use the bash script to call ffprobe, a utility for detecting scene changes
p = Popen(["bash", "ffprobe_script.bash", video_file_path], stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
# Create a dataframe of scene change times
#import pdb; pdb.set_trace()
scene_trans_df = pd.DataFrame(output.split()[2:])
## Check that scene transitions occur
if not scene_trans_df.empty:
scene_trans_df.columns = ["time"]
scene_trans_df.time = scene_trans_df.time.apply(lambda x: float(x))
for scene_time in scene_trans_df.time:
closest_pt = out_df.ix[(time_df.time - scene_time).abs().argsort()[:1]]
index = int(closest_pt.index[0])
out_df['is_scene_transition'][index] = 1
return out_df
@staticmethod
def detect_shake(video):
'''
Shake detection
'''
pass
@staticmethod
def detect_blur(video):
'''
Detect blur
'''
pass
@staticmethod
def optical_flow(video):
'''
Optical flow - useful for preprocessing
Should this go here? Or should this be in preprocessing???
'''
pass
@staticmethod
def synchrony(video):
'''
Audio/Visual Synchrony
'''
pass
@staticmethod
def find_faces(video):
'''
Find faces in the images
'''
pass
| pdxcycling/carv.io | video_analysis/code/video_analysis.py | Python | mit | 2,613 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp import Message
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import MatchXPath
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0107 import stanza, UserMood
log = logging.getLogger(__name__)
class XEP_0107(BasePlugin):
"""
XEP-0107: User Mood
"""
name = 'xep_0107'
description = 'XEP-0107: User Mood'
dependencies = set(['xep_0163'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Message, UserMood)
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature=UserMood.namespace)
self.xmpp['xep_0163'].remove_interest(UserMood.namespace)
def session_bind(self, jid):
self.xmpp['xep_0163'].register_pep('user_mood', UserMood)
def publish_mood(self, value=None, text=None, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Publish the user's current mood.
Arguments:
value -- The name of the mood to publish.
text -- Optional natural-language description or reason
for the mood.
options -- Optional form of publish options.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
mood = UserMood()
mood['value'] = value
mood['text'] = text
return self.xmpp['xep_0163'].publish(mood,
node=UserMood.namespace,
options=options,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout)
def stop(self, ifrom=None, block=True, callback=None, timeout=None):
"""
Clear existing user mood information to stop notifications.
Arguments:
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
mood = UserMood()
return self.xmpp['xep_0163'].publish(mood,
node=UserMood.namespace,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout)
| danielvdao/facebookMacBot | venv/lib/python2.7/site-packages/sleekxmpp/plugins/xep_0107/user_mood.py | Python | mit | 3,431 |
__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero',
'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile',
'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto',
'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction', 'isclose',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
import sys
import warnings
import multiarray
import umath
from umath import *
import numerictypes
from numerictypes import *
from numpy.testing.utils import WarningManager
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
nditer = multiarray.nditer
nested_iters = multiarray.nested_iters
broadcast = multiarray.broadcast
dtype = multiarray.dtype
copyto = multiarray.copyto
ufunc = type(sin)
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 0, casting='unsafe')
return res
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros` for further details.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(1)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
array([ 1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 1, casting='unsafe')
return res
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
count_nonzero = multiarray.count_nonzero
empty = multiarray.empty
empty_like = multiarray.empty_like
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
promote_types = multiarray.promote_types
min_scalar_type = multiarray.min_scalar_type
result_type = multiarray.result_type
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
einsum = multiarray.einsum
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric,
(correlate(a,v) == correlate(v,a), and the conjugate is not taken
for complex arrays). If False, uses the conventional signal
processing definition.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
dot, einsum
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError(msg % ('axis', axis, n))
if not (0 <= start < n+1):
raise ValueError(msg % ('start', start, n+1))
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
if skipdtype:
return "%s(%s)" % (cName, lst)
else:
typename = arr.dtype.name
# Quote typename in the output if it is "complex".
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = "'%s'" % typename
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters in turn be (0, 0), (0, 1),
(1, 0), (1, 1).
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
# ' <-- unbreak Emacs fontification
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False, ndmin=1)
y = array(b, copy=False, ndmin=1)
xinf = isinf(x)
yinf = isinf(y)
if any(xinf) or any(yinf):
# Check that x and y have inf's only in the same positions
if not all(xinf == yinf):
return False
# Check that sign of inf's in x and y is the same
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings('ignore', "invalid value")
ret = all(less_equal(abs(x-y), atol + rtol * abs(y)))
finally:
warn_ctx.__exit__()
return ret
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
err = seterr(invalid='ignore')
try:
result = less_equal(abs(x-y), atol + rtol * abs(y))
finally:
seterr(**err)
if isscalar(a) and isscalar(b):
result = bool(result)
return result
x = array(a, copy=False, subok=True, ndmin=1)
y = array(b, copy=False, subok=True, ndmin=1)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[isnan(x) & isnan(y)] = True
return cond
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(equal(a1,a2).all())
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(equal(a1,a2).all())
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." %size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." %size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| mbalasso/mynumpy | numpy/core/numeric.py | Python | bsd-3-clause | 74,762 |
#https://code.djangoproject.com/wiki/CookBookSplitModelsToFiles
from .account import Account
from .transaction_type import Transaction_Type
from .transaction import Transaction
from .help_request import Help_Request
from .settingsgroups import SettingsUserGroups
| vinicius-alves/InternetBanking | app/models/data_models/__init__.py | Python | gpl-3.0 | 318 |
#!/bin/env python3
L_RANGE = 100
R_RANGE = 999
max_num = None
for num in range(L_RANGE, R_RANGE + 1):
for num2 in range(L_RANGE, R_RANGE + 1):
if str(num*num2) == str(num*num2)[::-1]:
if max_num is None or num*num2 > max_num:
print(str(num) + "x" + str(num2))
max_num = num*num2
print("max: ", max_num)
| uskim/project-euler | 4/p4.py | Python | unlicense | 364 |
"""
send documents representing object data to elasticsearch for supported file extensions.
note: we truncate outbound documents to DOC_SIZE_LIMIT characters
(to bound memory pressure and request size to elastic)
a little knowledge on deletes and delete markers:
if bucket versioning is on:
- `aws s3api delete-object (no --version-id)` or `aws s3 rm`
- push a new delete marker onto the stack with a version-id
- generate ObjectRemoved:DeleteMarkerCreated
if bucket versioning was on and is then turned off:
- `aws s3 rm` or `aws s3api delete-object (no --version-id)`
- replace event at top of stack
- if a versioned delete marker, push a new one on top of it
- if an un-versioned delete marker, replace that marker with new marker
with version "null" (ObjectCreate will similarly replace the same with an object
of version "null")
- if object, destroy object
- generate ObjectRemoved:DeleteMarkerCreated
- problem: no way of knowing if DeleteMarkerCreated destroyed bytes
or just created a DeleteMarker; this is usually given by the return
value of `delete-object` but the S3 event has no knowledge of the same
- `aws s3api delete-object --version-id VERSION`
- destroy corresponding delete marker or object; v may be null in which
case it will destroy the object with version null (occurs when adding
new objects to a bucket that aws versioned but is no longer)
- generate ObjectRemoved:Deleted
if bucket version is off and has always been off:
- `aws s3 rm` or `aws s3api delete-object`
- destroy object
- generate a single ObjectRemoved:Deleted
counterintuitive things:
- turning off versioning doesn't mean version stack can't get deeper (by at
least 1) as indicated above in the case where a new marker is pushed onto
the version stack
- both creating a delete marker (soft delete) and hard deleting a delete marker
by providing it's version-id will result in an eventType of DeleteObject
and $.detail.responseElements.x-amz-delete-marker = true; it is therefore
not possible to tell the difference between a new delete marker and the deletion
of an existing one
See docs/EventBridge.md for more
"""
import datetime
import json
import os
import pathlib
import re
from os.path import split
from typing import Optional
from urllib.parse import unquote_plus
import boto3
import botocore
import nbformat
from dateutil.tz import tzutc
from document_queue import (
EVENT_PREFIX,
MAX_RETRY,
DocTypes,
DocumentQueue,
get_content_index_bytes,
get_content_index_extensions,
)
from jsonschema import ValidationError, draft7_format_checker, validate
from pdfminer.high_level import extract_text as extract_pdf_text
from tenacity import (
retry,
retry_if_exception,
stop_after_attempt,
wait_exponential,
)
from t4_lambda_shared.preview import (
ELASTIC_LIMIT_LINES,
extract_excel,
extract_fcs,
extract_parquet,
get_bytes,
get_preview_lines,
trim_to_bytes,
)
from t4_lambda_shared.utils import (
MANIFEST_PREFIX_V1,
POINTER_PREFIX_V1,
get_available_memory,
get_quilt_logger,
query_manifest_content,
separated_env_to_iter,
)
# translate events to S3 native names
EVENTBRIDGE_TO_S3 = {
"PutObject": EVENT_PREFIX["Created"] + "Put",
"CopyObject": EVENT_PREFIX["Created"] + "Copy",
"CompleteMultipartUpload": EVENT_PREFIX["Created"] + "CompleteMultipartUpload",
# see map_event_name for complete logic
"DeleteObject": None,
# "DeleteObjects" is not handled since it does not contain enough information on
# which objects where deleted
}
# ensure that we process events of known and expected shape
EVENT_SCHEMA = {
'type': 'object',
'properties': {
'awsRegion': {
'type': 'string'
},
'eventName': {
'type': 'string'
},
'eventTime': {
'type': 'string',
'format': 'date-time'
},
's3': {
'type': 'object',
'properties': {
'bucket': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
}
},
'required': ['name'],
'additionalProperties': True
},
'object': {
'type': 'object',
'properties': {
'eTag': {
'type': 'string'
},
'isDeleteMarker': {
'type': 'string'
},
'key': {
'type': 'string'
},
'versionId': {
'type': 'string'
}
},
'required': ['key'],
'additionalProperties': True
},
},
'required': ['bucket', 'object'],
'additionalProperties': True
},
},
'required': ['s3', 'eventName'],
'additionalProperties': True
}
# Max number of PDF pages to extract because it can be slow
MAX_PDF_PAGES = 100
# 10 MB, see https://amzn.to/2xJpngN
NB_VERSION = 4 # default notebook version for nbformat
# currently only affects .parquet, TODO: extend to other extensions
assert 'SKIP_ROWS_EXTS' in os.environ
SKIP_ROWS_EXTS = separated_env_to_iter('SKIP_ROWS_EXTS')
SELECT_PACKAGE_META = "SELECT * from S3Object o WHERE o.version IS NOT MISSING LIMIT 1"
# No WHERE clause needed for aggregations since S3 Select skips missing fields for aggs
SELECT_PACKAGE_STATS = (
"SELECT COALESCE(SUM(obj['size']), 0) as total_bytes,"
" COUNT(obj['size']) as total_files from S3Object obj"
)
TEST_EVENT = "s3:TestEvent"
# we need to filter out GetObject and HeadObject calls generated by the present
# lambda in order to display accurate analytics in the Quilt catalog
# a custom user agent enables said filtration
USER_AGENT_EXTRA = " quilt3-lambdas-es-indexer"
def now_like_boto3():
"""ensure timezone UTC for consistency with boto3:
Example of what boto3 returns on head_object:
'LastModified': datetime.datetime(2019, 11, 6, 3, 1, 16, tzinfo=tzutc()),
"""
return datetime.datetime.now(tz=tzutc())
def infer_extensions(key, ext):
"""guess extensions if possible"""
# Handle special case of hive partitions
# see https://www.qubole.com/blog/direct-writes-to-increase-spark-performance/
if (
re.fullmatch(r".c\d{3,5}", ext) or re.fullmatch(r".*-c\d{3,5}$", key)
or key.endswith("_0")
or ext == ".pq"
):
return ".parquet"
return ext
def should_retry_exception(exception):
"""don't retry certain 40X errors"""
if hasattr(exception, 'response'):
error_code = exception.response.get('Error', {}).get('Code', 218)
return error_code not in ["402", "403", "404"]
return False
@retry(
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=10),
retry=(retry_if_exception(should_retry_exception))
)
def select_manifest_meta(s3_client, bucket: str, key: str):
"""
wrapper for retry and returning a string
"""
try:
raw = query_manifest_content(
s3_client,
bucket=bucket,
key=key,
sql_stmt=SELECT_PACKAGE_META
)
return raw.read()
except botocore.exceptions.ClientError as cle:
print(f"Unable to S3 select manifest: {cle}")
return None
def do_index(
s3_client,
doc_queue: DocumentQueue,
event_type: str,
*,
bucket: str,
etag: str,
ext: str,
key: str,
last_modified: str,
text: str = '',
size: int = 0,
version_id: Optional[str] = None,
):
"""wrap dual indexing of packages and objects"""
logger_ = get_quilt_logger()
# index as object (always)
logger_.debug("%s to indexing queue (%s)", key, event_type)
doc_queue.append(
event_type,
DocTypes.OBJECT,
bucket=bucket,
ext=ext,
etag=etag,
key=key,
last_modified=last_modified,
size=size,
text=text,
version_id=version_id
)
# maybe index as package
if index_if_package(
s3_client,
doc_queue,
event_type,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
size=size,
version_id=version_id,
):
logger_.debug("%s indexed as package (%s)", key, event_type)
def index_if_package(
s3_client,
doc_queue: DocumentQueue,
event_type: str,
*,
bucket: str,
etag: str,
ext: str,
key: str,
last_modified: str,
version_id: Optional[str],
size: int
) -> bool:
"""index manifest pointer files as package documents in ES
Returns:
- True if pointer to manifest (and passes to doc_queue for indexing)
- False if not a manifest (no attempt at indexing)
"""
logger_ = get_quilt_logger()
pointer_prefix, pointer_file = split(key)
handle = pointer_prefix[len(POINTER_PREFIX_V1):]
if (
not pointer_file
or not pointer_prefix.startswith(POINTER_PREFIX_V1)
or len(handle) < 3
or '/' not in handle
):
logger_.debug("Not indexing as manifest file s3://%s/%s", bucket, key)
return False
try:
manifest_timestamp = int(pointer_file)
is_tag = False
if not 1451631600 <= manifest_timestamp <= 1767250800:
logger_.warning("Unexpected manifest timestamp s3://%s/%s", bucket, key)
return False
except ValueError as err:
is_tag = True
logger_.debug("Non-integer manifest pointer: s3://%s/%s, %s", bucket, key, err)
package_hash = ''
first_dict = {}
stats = None
# we only need to get manifest contents for proper create events (not latest pointers)
if event_type.startswith(EVENT_PREFIX["Created"]) and not is_tag:
package_hash = get_plain_text(
bucket,
key,
size,
None,
etag=etag,
s3_client=s3_client,
version_id=version_id,
).strip()
manifest_key = f'{MANIFEST_PREFIX_V1}{package_hash}'
first = select_manifest_meta(s3_client, bucket, manifest_key)
stats = select_package_stats(s3_client, bucket, manifest_key)
if not first:
logger_.error("S3 select failed %s %s", bucket, manifest_key)
return False
try:
first_dict = json.loads(first)
except (json.JSONDecodeError, botocore.exceptions.ClientError) as exc:
print(
f"{exc}\n"
f"\tFailed to select first line of manifest s3://{bucket}/{key}."
f"\tGot {first}."
)
return False
doc_queue.append(
event_type,
DocTypes.PACKAGE,
bucket=bucket,
etag=etag,
ext=ext,
handle=handle,
key=key,
last_modified=last_modified,
# if we don't have the hash, we're processing a tag
package_hash=(package_hash or pointer_file),
package_stats=stats,
pointer_file=pointer_file,
comment=str(first_dict.get("message", "")),
metadata=json.dumps(first_dict.get("user_meta", {})),
version_id=version_id,
)
return True
def select_package_stats(s3_client, bucket, manifest_key) -> str:
"""use s3 select to generate file stats for package"""
logger_ = get_quilt_logger()
try:
raw_stats = query_manifest_content(
s3_client,
bucket=bucket,
key=manifest_key,
sql_stmt=SELECT_PACKAGE_STATS
).read()
if raw_stats:
stats = json.loads(raw_stats)
assert isinstance(stats['total_bytes'], int)
assert isinstance(stats['total_files'], int)
return stats
except (
AssertionError,
botocore.exceptions.ClientError,
json.JSONDecodeError,
KeyError,
) as err:
logger_.error("Unable to compute package stats via S3 select: %s", err)
return None
def maybe_get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
"""get the byte contents of a file if it's a target for deep indexing"""
logger_ = get_quilt_logger()
if ext.endswith('.gz'):
compression = 'gz'
ext = ext[:-len('.gz')]
else:
compression = None
logger_.debug(
"Entering maybe_get_contents (could run out of mem.) %s %s %s", bucket, key, version_id
)
content = ""
inferred_ext = infer_extensions(key, ext)
if inferred_ext in get_content_index_extensions(bucket_name=bucket):
if inferred_ext == ".fcs":
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, info = extract_fcs(get_bytes(obj["Body"], compression), as_html=False)
# be smart and just send column names to ES (instead of bloated full schema)
# if this is not an HTML/catalog preview
content = trim_to_bytes(f"{body}\n{info}", get_content_index_bytes(bucket_name=bucket))
elif inferred_ext == ".ipynb":
content = trim_to_bytes(
# we have no choice but to fetch the entire notebook, because we
# are going to parse it
# warning: huge notebooks could spike memory here
get_notebook_cells(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
),
get_content_index_bytes(bucket_name=bucket),
)
elif inferred_ext == ".parquet":
if size >= get_available_memory():
print(f"{bucket}/{key} too large to deserialize; skipping contents")
# at least index the key and other stats, but don't overrun memory
# and fail indexing altogether
return ""
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, info = extract_parquet(
get_bytes(obj["Body"], compression),
as_html=False,
skip_rows=(inferred_ext in SKIP_ROWS_EXTS),
max_bytes=get_content_index_bytes(bucket_name=bucket),
)
# be smart and just send column names to ES (instead of bloated full schema)
# if this is not an HTML/catalog preview
columns = ','.join(list(info['schema']['names']))
content = trim_to_bytes(f"{columns}\n{body}", get_content_index_bytes(bucket_name=bucket))
elif inferred_ext == ".pdf":
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
content = trim_to_bytes(
extract_pdf(get_bytes(obj["Body"], compression)),
get_content_index_bytes(bucket_name=bucket),
)
elif inferred_ext in (".xls", ".xlsx"):
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
body, _ = extract_excel(get_bytes(obj["Body"], compression), as_html=False)
content = trim_to_bytes(
body,
get_content_index_bytes(bucket_name=bucket),
)
else:
content = get_plain_text(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
return content
def extract_pdf(file_):
"""Get plain text form PDF for searchability.
Args:
file_ - file-like object opened in binary mode, pointing to XLS or XLSX
Returns:
pdf text as a string
Warning:
This function can be slow. The 8-page test PDF takes ~10 sec to turn into a string.
"""
txt = extract_pdf_text(file_, maxpages=MAX_PDF_PAGES)
# crunch down space; extract_text inserts multiple spaces
# between words, literal newlines, etc.
return re.sub(r"\s+", " ", txt)
def extract_text(notebook_str):
""" Extract code and markdown
Args:
* nb - notebook as a string
Returns:
* str - select code and markdown source (and outputs)
Pre:
* notebook is well-formed per notebook version 4
* "cell_type" is defined for all cells
* "source" defined for all "code" and "markdown" cells
Throws:
* Anything nbformat.reads() can throw :( which is diverse and poorly
documented, hence the `except Exception` in handler()
Notes:
* Deliberately decided not to index output streams and display strings
because they were noisy and low value
* Tested this code against ~6400 Jupyter notebooks in
s3://alpha-quilt-storage/tree/notebook-search/
* Might be useful to index "cell_type" : "raw" in the future
See also:
* Format reference https://nbformat.readthedocs.io/en/latest/format_description.html
"""
formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)
text = []
for cell in formatted.get("cells", []):
if "source" in cell and cell.get("cell_type") in ("code", "markdown"):
text.append(cell["source"])
return "\n".join(text)
def get_notebook_cells(bucket, key, size, compression, *, etag, s3_client, version_id):
"""extract cells for ipynb notebooks for indexing"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
data = get_bytes(obj["Body"], compression)
notebook = data.getvalue().decode("utf-8")
try:
text = extract_text(notebook)
except (json.JSONDecodeError, nbformat.reader.NotJSONError):
print(f"Invalid JSON in {key}.")
except (KeyError, AttributeError) as err:
print(f"Missing key in {key}: {err}")
# there might be more errors than covered by test_read_notebook
# better not to fail altogether
except Exception as exc: # pylint: disable=broad-except
print(f"Exception in file {key}: {exc}")
except UnicodeDecodeError as uni:
print(f"Unicode decode error in {key}: {uni}")
return text
def get_plain_text(
bucket,
key,
size,
compression,
*,
etag,
s3_client,
version_id
) -> str:
"""get plain text object contents"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
limit=get_content_index_bytes(bucket_name=bucket),
version_id=version_id
)
lines = get_preview_lines(
obj["Body"],
compression,
ELASTIC_LIMIT_LINES,
get_content_index_bytes(bucket_name=bucket),
)
text = '\n'.join(lines)
except UnicodeDecodeError as ex:
print(f"Unicode decode error in {key}", ex)
return text
def make_s3_client():
"""make a client with a custom user agent string so that we can
filter the present lambda's requests to S3 from object analytics"""
configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)
return boto3.client("s3", config=configuration)
def map_event_name(event: dict):
"""transform eventbridge names into S3-like ones"""
input_ = event["eventName"]
if input_ in EVENTBRIDGE_TO_S3:
if input_ == "DeleteObject":
if event["s3"]["object"].get("isDeleteMarker"):
return EVENT_PREFIX["Removed"] + "DeleteMarkerCreated"
return EVENT_PREFIX["Removed"] + "Delete"
# all non-delete events just use the map
return EVENTBRIDGE_TO_S3[input_]
# leave event type unchanged if we don't recognize it
return input_
def shape_event(event: dict):
"""check event schema, return None if schema check fails"""
logger_ = get_quilt_logger()
try:
validate(
instance=event,
schema=EVENT_SCHEMA,
# format_checker= required for for format:date-time validation
# (we also need strict-rfc3339 in requirements.txt)
format_checker=draft7_format_checker,
)
except ValidationError as error:
logger_.error("Invalid event format: %s\n%s", error, event)
return None
# be a good citizen and don't modify params
return {
**event,
'eventName': map_event_name(event),
}
def handler(event, context):
"""enumerate S3 keys in event, extract relevant data, queue events, send to
elastic via bulk() API
"""
logger_ = get_quilt_logger()
# message is a proper SQS message, which either contains a single event
# (from the bucket notification system) or batch-many events as determined
# by enterprise/**/bulk_loader.py
# An exception that we'll want to re-raise after the batch sends
content_exception = None
batch_processor = DocumentQueue(context)
s3_client = make_s3_client()
for message in event["Records"]:
body = json.loads(message["body"])
body_message = json.loads(body["Message"])
if "Records" not in body_message:
# could be TEST_EVENT, or another unexpected event; skip it
logger_.error("No 'Records' key in message['body']: %s", message)
continue
events = body_message["Records"]
# event is a single S3 event
for event_ in events:
validated = shape_event(event_)
if not validated:
logger_.debug("Skipping invalid event %s", event_)
continue
event_ = validated
logger_.debug("Processing %s", event_)
try:
event_name = event_["eventName"]
# Process all Create:* and Remove:* events
if not any(event_name.startswith(n) for n in EVENT_PREFIX.values()):
logger_.warning("Skipping unknown event type: %s", event_name)
continue
bucket = event_["s3"]["bucket"]["name"]
# In the grand tradition of IE6, S3 events turn spaces into '+'
# TODO: check if eventbridge events do the same thing with +
key = unquote_plus(event_["s3"]["object"]["key"])
version_id = event_["s3"]["object"].get("versionId", None)
# ObjectRemoved:Delete does not include "eTag"
etag = event_["s3"]["object"].get("eTag", "")
# synthetic events from bulk scanner might define lastModified
last_modified = (
event_["s3"]["object"].get("lastModified") or event_["eventTime"]
)
# Get two levels of extensions to handle files like .csv.gz
path = pathlib.PurePosixPath(key)
ext1 = path.suffix
ext2 = path.with_suffix('').suffix
ext = (ext2 + ext1).lower()
# Handle delete and deletemarker first and then continue so that
# head_object and get_object (below) don't fail
if event_name.startswith(EVENT_PREFIX["Removed"]):
do_index(
s3_client,
batch_processor,
event_name,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
version_id=version_id
)
continue
try:
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=version_id,
etag=etag
)
except botocore.exceptions.ClientError as first:
logger_.warning("head_object error: %s", first)
# "null" version sometimes results in 403s for buckets
# that have changed versioning, retry without it
if (first.response.get('Error', {}).get('Code') == "403"
and version_id == "null"):
try:
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=None,
etag=etag
)
except botocore.exceptions.ClientError as second:
# this will bypass the DLQ but that's the right thing to do
# as some listed objects may NEVER succeed head requests
# (e.g. foreign owner) and there's no reason to torpedo
# the whole batch (which might include good files)
logger_.warning("Retried head_object error: %s", second)
logger_.error("Fatal head_object, skipping event: %s", event_)
continue
# backfill fields based on the head_object
size = head["ContentLength"]
last_modified = last_modified or head["LastModified"].isoformat()
etag = head.get("etag") or etag
version_id = head.get("VersionId") or version_id
try:
text = maybe_get_contents(
bucket,
key,
ext,
etag=etag,
version_id=version_id,
s3_client=s3_client,
size=size
)
# we still want an entry for this document in elastic so that, e.g.,
# the file counts from elastic are correct
# these exceptions can happen for a variety of reasons (e.g. glacier
# storage class, index event arrives after delete has occurred, etc.)
# given how common they are, we shouldn't fail the batch for this
except Exception as exc: # pylint: disable=broad-except
text = ""
logger_.warning("Content extraction failed %s %s %s", bucket, key, exc)
do_index(
s3_client,
batch_processor,
event_name,
bucket=bucket,
etag=etag,
ext=ext,
key=key,
last_modified=last_modified,
size=size,
text=text,
version_id=version_id
)
except botocore.exceptions.ClientError as boto_exc:
if not should_retry_exception(boto_exc):
logger_.warning("Skipping non-fatal exception: %s", boto_exc)
continue
logger_.critical("Failed record: %s, %s", event, boto_exc)
raise boto_exc
# flush the queue
batch_processor.send_all()
def retry_s3(
operation,
bucket,
key,
size=None,
limit=None,
*,
etag,
version_id,
s3_client
):
"""retry head or get operation to S3 with; stop before we run out of time.
retry is necessary since, due to eventual consistency, we may not
always get the required version of the object.
"""
logger_ = get_quilt_logger()
if operation == "head":
function_ = s3_client.head_object
elif operation == "get":
function_ = s3_client.get_object
else:
raise ValueError(f"unexpected operation: {operation}")
# Keyword arguments to function_
arguments = {
"Bucket": bucket,
"Key": key
}
if operation == 'get' and size and limit:
# can only request range if file is not empty
arguments['Range'] = f"bytes=0-{min(size, limit) - 1}"
if version_id:
arguments['VersionId'] = version_id
elif etag:
arguments['IfMatch'] = etag
logger_.debug("Entering @retry: %s, %s", operation, arguments)
@retry(
# debug
reraise=True,
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=10),
retry=(retry_if_exception(should_retry_exception))
)
def call():
"""local function so we can set stop_after_delay dynamically"""
# TODO: remove all this, stop_after_delay is not dynamically loaded anymore
return function_(**arguments)
return call()
| quiltdata/quilt-compiler | lambdas/es/indexer/index.py | Python | apache-2.0 | 30,457 |
#!/usr/bin/env python
"""Wordnik.com's Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates."""
import sys
import os
import re
import urllib.request, urllib.parse, urllib.error
import http.client
import json
import datetime
import mimetypes
import base64
from .models import *
from groupdocs.FileStream import FileStream
from groupdocs import version
class RequestSigner(object):
def __init__(self):
if type(self) == RequestSigner:
raise Exception("RequestSigner is an abstract class and cannot be instantiated.")
def signUrl(self, url):
raise NotImplementedError
def signContent(self, requestBody, headers):
raise NotImplementedError
class DefaultRequestSigner(RequestSigner):
def signUrl(self, url):
return url
def signContent(self, requestBody, headers):
return requestBody
class ApiClient(object):
"""Generic API client for Swagger client library builds"""
def __init__(self, requestSigner=None):
self.signer = requestSigner if requestSigner != None else DefaultRequestSigner()
self.cookie = None
self.headers = {'Groupdocs-Referer': '/'.join((version.__pkgname__, version.__version__))}
self.__debug = False
def setDebug(self, flag, logFilepath=None):
self.__debug = flag
self.__logFilepath = logFilepath
def addHeaders(self, **headers):
self.headers = headers
def callAPI(self, apiServer, resourcePath, method, queryParams, postData,
headerParams=None, returnType=str):
if self.__debug and self.__logFilepath:
stdOut = sys.stdout
logFile = open(self.__logFilepath, 'a')
sys.stdout = logFile
url = apiServer + resourcePath
headers = {}
if self.headers:
for param, value in self.headers.items():
headers[param] = value
if headerParams:
for param, value in headerParams.items():
headers[param] = value
isFileUpload = False
if not postData:
headers['Content-type'] = 'text/html'
elif isinstance(postData, FileStream):
isFileUpload = True
if postData.contentType:
headers['Content-type'] = 'application/octet-stream'
if postData.size:
headers['Content-Length'] = str(postData.size)
else:
headers['Content-type'] = 'application/json'
if self.cookie:
headers['Cookie'] = self.cookie
data = None
if queryParams:
# Need to remove None values, these should not be sent
sentQueryParams = {}
for param, value in queryParams.items():
if value != None:
sentQueryParams[param] = value
if sentQueryParams:
url = url + '?' + urllib.parse.urlencode(sentQueryParams)
if method in ['POST', 'PUT', 'DELETE']:
if isFileUpload:
data = postData.inputStream
elif not postData:
data = ""
elif type(postData) not in [str, int, float, bool]:
data = self.signer.signContent(json.dumps(self.sanitizeForSerialization(postData)), headers)
else:
data = self.signer.signContent(postData, headers)
if self.__debug:
http.client.HTTPConnection.debuglevel = 1
if data and not isFileUpload:
data = data.encode('utf-8')
request = MethodRequest(method=method, url=self.encodeURI(self.signer.signUrl(url)), headers=headers,
data=data)
try:
# Make the request
response = urllib.request.urlopen(request)
if 'Set-Cookie' in response.headers:
self.cookie = response.headers['Set-Cookie']
if response.code == 200 or response.code == 201 or response.code == 202:
if returnType == FileStream:
fs = FileStream.fromHttp(response)
if self.__debug: print("\n", "< Response Body:\n", ">>>stream info: fileName=%s contentType=%s size=%s" % (fs.fileName, fs.contentType, fs.size), "\n", sep="")
return fs if 'Transfer-Encoding' in response.headers or (fs.size != None and int(fs.size) > 0) else None
else:
encoding = response.headers.get_content_charset()
if not encoding: encoding = 'iso-8859-1'
string = response.read().decode(encoding)
if self.__debug: print("\n", "< Response Body:\n", string, "\n", sep="")
try:
data = json.loads(string)
except ValueError: # PUT requests don't return anything
data = None
return data
elif response.code == 404:
return None
else:
encoding = response.headers.get_content_charset()
if not encoding: encoding = 'iso-8859-1'
string = response.read().decode(encoding)
try:
msg = json.loads(string)['error_message']
except ValueError:
msg = string
raise ApiException(response.code, msg)
except urllib.error.HTTPError as e:
raise ApiException(e.code, e.msg)
finally:
if isFileUpload:
try:
postData.inputStream.close()
except Exception as e:
pass
if self.__debug:
http.client.HTTPConnection.debuglevel = 0
if self.__logFilepath:
sys.stdout = stdOut
logFile.close()
def toPathValue(self, obj):
"""Serialize a list to a CSV string, if necessary.
Args:
obj -- data object to be serialized
Returns:
string -- json serialization of object
"""
if type(obj) == list:
return ','.join(obj)
else:
return obj
def sanitizeForSerialization(self, obj):
"""Dump an object into JSON for POSTing."""
if not obj:
return None
elif type(obj) in [str, int, float, bool]:
return obj
elif type(obj) == list:
return [self.sanitizeForSerialization(subObj) for subObj in obj]
elif type(obj) == datetime.datetime:
return obj.isoformat()
else:
if type(obj) == dict:
objDict = obj
else:
objDict = obj.__dict__
return {key: self.sanitizeForSerialization(val)
for (key, val) in objDict.items()
if key != 'swaggerTypes' and val != None}
def deserialize(self, obj, objClass):
"""Derialize a JSON string into an object.
Args:
obj -- string or object to be deserialized
objClass -- class literal for deserialzied object, or string
of class name
Returns:
object -- deserialized object"""
if not obj:
return None
# Have to accept objClass as string or actual type. Type could be a
# native Python type, or one of the model classes.
if type(objClass) == str:
if 'list[' in objClass:
match = re.match('list\[(.*)\]', objClass)
subClass = match.group(1)
return [self.deserialize(subObj, subClass) for subObj in obj]
if (objClass in ['int', 'float', 'dict', 'list', 'str']):
objClass = eval(objClass)
else: # not a native type, must be model class
objClass = eval(objClass + '.' + objClass)
if objClass in [str, int, float, bool]:
return objClass(obj)
elif objClass == datetime:
# Server will always return a time stamp in UTC, but with
# trailing +0000 indicating no offset from UTC. So don't process
# last 5 characters.
return datetime.datetime.strptime(obj[:-5],
"%Y-%m-%dT%H:%M:%S.%f")
instance = objClass()
for attr, attrType in instance.swaggerTypes.items():
lc_attr = attr[0].lower() + attr[1:]
uc_attr = attr[0].upper() + attr[1:]
real_attr = None
if attr in obj:
real_attr = attr
elif lc_attr in obj:
real_attr = lc_attr
elif uc_attr in obj:
real_attr = uc_attr
if real_attr != None:
value = obj[real_attr]
if not value:
setattr(instance, real_attr, None)
elif attrType in ['str', 'int', 'long', 'float', 'bool']:
attrType = eval(attrType)
try:
value = attrType(value)
except UnicodeEncodeError:
value = str(value)
setattr(instance, real_attr, value)
elif 'list[' in attrType:
match = re.match('list\[(.*)\]', attrType)
subClass = match.group(1)
subValues = []
for subValue in value:
subValues.append(self.deserialize(subValue,
subClass))
setattr(instance, real_attr, subValues)
else:
setattr(instance, real_attr, self.deserialize(value,
attrType))
return instance
@staticmethod
def encodeURI(url):
encoded = urllib.parse.quote(url, safe='~@#$&()*!=:;,.?/\'').replace("%25", "%")
return encoded
@staticmethod
def encodeURIComponent(url):
return urllib.parse.quote(url, safe='~()*!.\'')
@staticmethod
def readAsDataURL(filePath):
mimetype = mimetypes.guess_type(filePath, False)[0] or "application/octet-stream"
filecontents = open(filePath, 'rb').read()
return 'data:' + mimetype + ';base64,' + base64.b64encode(filecontents).decode()
class MethodRequest(urllib.request.Request):
def __init__(self, *args, **kwargs):
"""Construct a MethodRequest. Usage is the same as for
`urllib.Request` except it also takes an optional `method`
keyword argument. If supplied, `method` will be used instead of
the default."""
if 'method' in kwargs:
self.method = kwargs.pop('method')
return urllib.request.Request.__init__(self, *args, **kwargs)
def get_method(self):
return getattr(self, 'method', urllib.request.Request.get_method(self))
class ApiException(Exception):
def __init__(self, code, *args):
super(Exception, self).__init__((code, ) + args)
self.code = code
| liosha2007/temporary-groupdocs-python3-sdk | groupdocs/ApiClient.py | Python | apache-2.0 | 11,500 |
# -*- coding:utf-8 -*-
#!/usr/bin/env python
#
# Author: promisejohn
# Email: promise.john@gmail.com
#
from flask import Flask, jsonify, abort, make_response, request, url_for
from flask.ext.httpauth import HTTPBasicAuth
app = Flask(__name__)
auth = HTTPBasicAuth()
tasks = [
{
'id': 1,
'title': u'学习 python',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@auth.get_password
def getPassword(username):
if username == 'hello':
return 'python'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
@app.route("/",methods=["GET"])
def index():
return "Hello World!"
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({"error":"Not Found."}),404)
def makePublicTask(task):
new_task = {}
for field in task:
if field == 'id':
# _external=True means add Domain and ports to the URL generated.
new_task['uri'] = url_for('getTask',task_id=task['id'],_external=True)
else:
new_task[field] = task[field]
return new_task
@app.route("/todo/api/v1.0/tasks",methods=["GET"])
@auth.login_required
def getTasks():
return jsonify({'tasks':map(makePublicTask,tasks)})
@app.route("/todo/api/v1.0/tasks/<int:task_id>",methods=["GET"])
def getTask(task_id):
task_list = filter(lambda t: t['id'] == task_id, tasks)
if len(task_list) == 0:
abort(404)
return jsonify({'task':task_list[0]})
@app.route("/todo/api/v1.0/tasks",methods=["POST"])
def createTask():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({"task":task}),201
@app.route("/todo/api/v1.0/tasks/<int:task_id>",methods=['PUT'])
def updateTask(task_id):
task_list = filter(lambda t: t['id'] == task_id, tasks)
if len(task_list) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task = task_list[0]
task['title'] = request.json.get('title',task['title'])
task['description'] = request.json.get('description',task['description'])
task['done'] = request.json.get('done',task['done'])
return jsonify({'task':task})
@app.route("/todo/api/v1.0/tasks/<int:task_id>",methods=["DELETE"])
def deleteTask(task_id):
task_list = filter(lambda t: t['id'] == task_id, tasks)
if len(task_list) == 0:
abort(404)
tasks.remove(task_list[0])
return jsonify({'result':True})
if __name__ == '__main__':
app.run(debug=True) | promisejohn/todo.flask | app/app.py | Python | apache-2.0 | 3,020 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If some modules are not found, we use others, so no need to warn:
# pylint: disable=import-error
try:
from setuptools import setup
from setuptools.command.build_py import build_py
from setuptools.command.sdist import sdist
from setuptools.command.test import test
except ImportError:
from distutils.core import setup
from distutils.cmd import Command
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
class test(Command):
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
def initialize_options(self): pass
def finalize_options(self): pass
def run(self): self.run_tests()
def run_tests(self): Command.run_tests(self)
def set_undefined_options(self, opt, val):
Command.set_undefined_options(self, opt, val)
def get_version():
import re
import subprocess
# git describe a commit using the most recent tag reachable from it.
# Release tags start with v* (XXX what about other tags starting with v?)
# and are of the form `v1.1.2`.
#
# The output `desc` will be of the form v1.1.2-2-gb92bef6[-dirty]:
# - verpart v1.1.2
# - revpart 2
# - localpart gb92bef6[-dirty]
desc = subprocess.check_output([
'git', 'describe', '--dirty', '--long', '--match', 'v*',
])
match = re.match(r'^v([^-]*)-([0-9]+)-(.*)$', desc)
assert match is not None
verpart, revpart, localpart = match.groups()
# Create a post version.
if revpart > '0' or 'dirty' in localpart:
# Local part may be g0123abcd or g0123abcd-dirty.
# Hyphens not kosher here, so replace by dots.
localpart = localpart.replace('-', '.')
full_version = '%s.post%s+%s' % (verpart, revpart, localpart)
# Create a release version.
else:
full_version = verpart
# Strip the local part if there is one, to appease pkg_resources,
# which handles only PEP 386, not PEP 440.
if '+' in full_version:
pkg_version = full_version[:full_version.find('+')]
else:
pkg_version = full_version
# Sanity-check the result. XXX Consider checking the full PEP 386
# and PEP 440 regular expressions here?
assert '-' not in full_version, '%r' % (full_version,)
assert '-' not in pkg_version, '%r' % (pkg_version,)
assert '+' not in pkg_version, '%r' % (pkg_version,)
return pkg_version, full_version
pkg_version, full_version = get_version()
def write_version_py(path):
try:
with open(path, 'rb') as f:
version_old = f.read()
except IOError:
version_old = None
version_new = '__version__ = %r\n' % (full_version,)
if version_old != version_new:
print 'writing %s' % (path,)
with open(path, 'wb') as f:
f.write(version_new)
def sha256_file(pathname):
import hashlib
sha256 = hashlib.sha256()
with open(pathname, 'rb') as source_file:
for block in iter(lambda: source_file.read(65536), ''):
sha256.update(block)
return sha256
def uptodate(path_in, path_out, path_sha256):
import errno
try:
sha256_in = sha256_file(path_in).hexdigest()
sha256_out = sha256_file(path_out).hexdigest()
expected = bytes('%s\n%s\n' % (sha256_in, sha256_out))
with open(path_sha256, 'rb') as file_sha256:
actual = file_sha256.read(len(expected))
if actual != expected or file_sha256.read(1) != '':
return False
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
return False
return True
def commit(path_in, path_out, path_sha256):
import os
with open(path_sha256 + '.tmp', 'wb') as file_sha256:
file_sha256.write('%s\n' % (sha256_file(path_in).hexdigest(),))
file_sha256.write('%s\n' % (sha256_file(path_out).hexdigest(),))
os.rename(path_sha256 + '.tmp', path_sha256)
def generate_parser(lemonade, path_y):
import distutils.spawn
import os.path
import sys
root = os.path.dirname(os.path.abspath(__file__))
lemonade = os.path.join(root, *lemonade.split('/'))
base, ext = os.path.splitext(path_y)
assert ext == '.y'
path_py = base + '.py'
path_sha256 = base + '.sha256'
if uptodate(path_y, path_py, path_sha256):
return
print 'generating %s -> %s' % (path_y, path_py)
distutils.spawn.spawn([
'/usr/bin/env', 'PYTHONPATH=' + lemonade,
sys.executable,
lemonade + '/bin/lemonade',
'-s', # Write statistics to stdout.
path_y,
])
commit(path_y, path_py, path_sha256)
class local_build_py(build_py):
def run(self):
write_version_py(version_py)
for grammar in grammars:
generate_parser(lemonade, grammar)
build_py.run(self)
# Make sure the VERSION file in the sdist is exactly specified, even
# if it is a development version, so that we do not need to run git to
# discover it -- which won't work because there's no .git directory in
# the sdist.
class local_sdist(sdist):
def make_release_tree(self, base_dir, files):
import os
sdist.make_release_tree(self, base_dir, files)
version_file = os.path.join(base_dir, 'VERSION')
print('updating %s' % (version_file,))
# Write to temporary file first and rename over permanent not
# just to avoid atomicity issues (not likely an issue since if
# interrupted the whole sdist directory is only partially
# written) but because the upstream sdist may have made a hard
# link, so overwriting in place will edit the source tree.
with open(version_file + '.tmp', 'wb') as f:
f.write('%s\n' % (pkg_version,))
os.rename(version_file + '.tmp', version_file)
class local_test(test):
description = "Run check.sh"
user_options = [('fail=', None, 'Use check.sh.')] # for distutils
def __init__(self, *args, **kwargs):
test.__init__(self, *args, **kwargs)
self.test_suite = "not None"
def run_tests(self):
import subprocess
subprocess.check_call(["./check.sh"])
print "Using ./check.sh directly gives you more options for testing."
# XXX These should be attributes of `setup', but helpful distutils
# doesn't pass them through when it doesn't know about them a priori.
version_py = 'src/version.py'
lemonade = 'external/lemonade/dist'
grammars = [
'src/grammar.y',
'src/backends/cgpm_alter/grammar.y',
'src/backends/cgpm_analyze/grammar.y',
'src/backends/cgpm_schema/grammar.y',
]
setup(
name='bayeslite',
version=pkg_version,
description='BQL database built on SQLite3',
url='http://probcomp.csail.mit.edu/bayesdb',
author='MIT Probabilistic Computing Project',
author_email='bayesdb@mit.edu',
license='Apache License, Version 2.0',
tests_require=[
'pandas',
'pexpect',
'pytest',
'scipy',
],
packages=[
'bayeslite',
'bayeslite.backends',
'bayeslite.backends.cgpm_alter',
'bayeslite.backends.cgpm_analyze',
'bayeslite.backends.cgpm_schema',
'bayeslite.plex',
'bayeslite.shell',
'bayeslite.tests',
'bayeslite.weakprng',
],
package_dir={
'bayeslite': 'src',
'bayeslite.plex': 'external/plex/dist/Plex',
'bayeslite.shell': 'shell/src',
'bayeslite.tests': 'tests',
'bayeslite.weakprng': 'external/weakprng/dist',
},
# Not in this release, perhaps later.
#scripts=['shell/scripts/bayeslite'],
test_suite = "not None", # Without it, run_tests is not called.
cmdclass={
'build_py': local_build_py,
'sdist': local_sdist,
'test': local_test,
},
package_data={
'bayeslite.backends': ['*.schema.json'],
'bayeslite.tests': [
'dha.csv',
'satellites.csv',
],
},
)
| probcomp/bayeslite | setup.py | Python | apache-2.0 | 8,707 |
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
# parameters = model(train_X, train_Y, initialization = "zeros")
# print ("On the train set:")
# predictions_train = predict(train_X, train_Y, parameters)
# print ("On the test set:")
# predictions_test = predict(test_X, test_Y, parameters)
# plt.title("Model with Zeros initialization")
# axes = plt.gca()
# axes.set_xlim([-1.5,1.5])
# axes.set_ylim([-1.5,1.5])
# plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# parameters = model(train_X, train_Y, initialization = "random")
# print ("On the train set:")
# predictions_train = predict(train_X, train_Y, parameters)
# print ("On the test set:")
# predictions_test = predict(test_X, test_Y, parameters)
# plt.title("Model with large random initialization")
# axes = plt.gca()
# axes.set_xlim([-1.5,1.5])
# axes.set_ylim([-1.5,1.5])
# plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1]) * np.sqrt(2./layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
# parameters = model(train_X, train_Y, initialization = "he")
# print ("On the train set:")
# predictions_train = predict(train_X, train_Y, parameters)
# print ("On the test set:")
# predictions_test = predict(test_X, test_Y, parameters) | jw2100/beginning.github.io | DeepLearning/wuenda/02_ImprovingDeepNeuralNetworksHyperparametertuningRegularization/week1-01-Initialization.py | Python | gpl-3.0 | 7,259 |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.model import SuiteVisitor
from robot.utils import plural_or_not, secs_to_timestr
from .highlighting import HighlightingStream
class DottedOutput(object):
def __init__(self, width=78, colors='AUTO', stdout=None, stderr=None):
self._width = width
self._stdout = HighlightingStream(stdout or sys.__stdout__, colors)
self._stderr = HighlightingStream(stderr or sys.__stderr__, colors)
self._markers_on_row = 0
def start_suite(self, suite):
if not suite.parent:
self._stdout.write("Running suite '%s' with %d tests.\n"
% (suite.name, suite.test_count))
self._stdout.write('=' * self._width + '\n')
def end_test(self, test):
if self._markers_on_row == self._width:
self._stdout.write('\n')
self._markers_on_row = 0
self._markers_on_row += 1
if test.passed:
self._stdout.write('.')
elif 'robot-exit' in test.tags:
self._stdout.write('x')
elif not test.critical:
self._stdout.write('f')
else:
self._stdout.highlight('F', 'FAIL')
def end_suite(self, suite):
if not suite.parent:
self._stdout.write('\n')
StatusReporter(self._stdout, self._width).report(suite)
self._stdout.write('\n')
def message(self, msg):
if msg.level in ('WARN', 'ERROR'):
self._stderr.error(msg.message, msg.level)
def output_file(self, name, path):
self._stdout.write('%-8s %s\n' % (name+':', path))
class StatusReporter(SuiteVisitor):
def __init__(self, stream, width):
self._stream = stream
self._width = width
def report(self, suite):
suite.visit(self)
stats = suite.statistics
self._stream.write("%s\nRun suite '%s' with %d test%s in %s.\n\n"
% ('=' * self._width, suite.name,
stats.all.total, plural_or_not(stats.all.total),
secs_to_timestr(suite.elapsedtime/1000.0)))
self._stream.highlight(suite.status + 'ED', suite.status)
self._stream.write('\n%s\n' % stats.message)
def visit_test(self, test):
if not test.passed and test.critical and 'robot-exit' not in test.tags:
self._stream.write('-' * self._width + '\n')
self._stream.highlight('FAIL')
self._stream.write(': %s\n%s\n' % (test.longname,
test.message.strip()))
| alexandrul-ci/robotframework | src/robot/output/console/dotted.py | Python | apache-2.0 | 3,228 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core.brain.delete.url.reaction import Reaction
class ReactionCopy(Reaction):
def __init__(self, *args, **kwargs):
"""docstring for __init__"""
super(ReactionCopy, self).__init__()
| vsilent/smarty-bot | core/brain/remove/url/reaction.py | Python | mit | 254 |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from oscar.core.loading import get_class
Node = get_class('dashboard.nav', 'Node')
def get_nodes(user):
"""
Return the visible navigation nodes for the passed user
"""
all_nodes = create_menu(settings.OSCAR_DASHBOARD_NAVIGATION)
visible_nodes = []
for node in all_nodes:
filtered_node = node.filter(user)
# don't append headings without children
if filtered_node and (filtered_node.has_children() or
not filtered_node.is_heading):
visible_nodes.append(filtered_node)
return visible_nodes
def create_menu(menu_items, parent=None):
"""
Create the navigation nodes based on a passed list of dicts
"""
nodes = []
default_fn = import_string(
settings.OSCAR_DASHBOARD_DEFAULT_ACCESS_FUNCTION)
for menu_dict in menu_items:
try:
label = menu_dict['label']
except KeyError:
raise ImproperlyConfigured(
"No label specified for menu item in dashboard")
children = menu_dict.get('children', [])
if children:
node = Node(label=label, icon=menu_dict.get('icon', None),
access_fn=menu_dict.get('access_fn', default_fn))
create_menu(children, parent=node)
else:
node = Node(label=label, icon=menu_dict.get('icon', None),
url_name=menu_dict.get('url_name', None),
url_kwargs=menu_dict.get('url_kwargs', None),
url_args=menu_dict.get('url_args', None),
access_fn=menu_dict.get('access_fn', default_fn))
if parent is None:
nodes.append(node)
else:
parent.add_child(node)
return nodes
| itbabu/django-oscar | src/oscar/apps/dashboard/menu.py | Python | bsd-3-clause | 1,921 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.contextutil import temporary_dir
class BootstrapJvmToolsIntegrationTest(PantsRunIntegrationTest):
def test_zinc_tool_reuse_between_scala_and_java(self):
with temporary_dir() as artifact_cache:
bootstrap_args = [
"bootstrap.bootstrap-jvm-tools",
f"--cache-write-to=['{artifact_cache}']",
f"--cache-read-from=['{artifact_cache}']",
]
# Scala compilation should bootstrap and shade zinc.
args = bootstrap_args + ["compile", "examples/src/scala/org/pantsbuild/example/hello"]
pants_run = self.run_pants(args)
self.assert_success(pants_run)
self.assertTrue("[shade-compiler-interface]" in pants_run.stdout_data)
# The shaded zinc artifact should be cached, so zinc-based Java compilation
# should reuse it instead of bootstrapping and shading again, even after clean-all.
pants_run = self.run_pants(
bootstrap_args
+ ["clean-all", "compile", "examples/src/java/org/pantsbuild/example/hello/simple"]
)
self.assert_success(pants_run)
self.assertFalse("[shade-compiler-interface]" in pants_run.stdout_data)
| tdyas/pants | tests/python/pants_test/tasks/test_bootstrap_jvm_tools_integration.py | Python | apache-2.0 | 1,477 |
#
# Copyright (C) 2009 Chris Newton <redshodan@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Author: Chris Newton <redshodan@gmail.com>
# $Revision$
#
import os, sys, unittest2, subprocess, signal, termios
import log
THE_TEST = None
TESTUSER = None
TESTPASS = None
TESTPORT = "2227"
ROOT = 1
TEST = 2
USER = 3
SCREEN_NONE = "No Sockets found in /var/run/screen/S-test."
##
## unittest behavior adjustment
##
class LsshTestCase(unittest2.TestCase):
def __init__(self, name, timeout=10):
unittest2.TestCase.__init__(self, name)
self.test_name = str(self)
self.orgtest = getattr(self, name)
setattr(self, name, self._run)
if hasattr(self.orgtest, "timeout"):
self.timeout = self.orgtest.timeout
else:
self.timeout = timeout
self.error = ""
def _sigalarm(self, sig, frame):
log.info("_sigalarm: test failed to complete in time")
self.error = "Test failed to complete in time: "
self.cmd_timeout = True
self.cmd.kill()
def setUp(self):
global THE_TEST
unittest2.TestCase.setUp(self)
log.info("---------Starting test: %s(timeout=%d)---------",
self.test_name, self.timeout)
THE_TEST = self
self.cmd_timeout = False
self.error = ""
signal.signal(signal.SIGALRM, self._sigalarm)
signal.alarm(self.timeout)
def tearDown(self):
signal.alarm(0)
unittest2.TestCase.tearDown(self)
log.info("---------Ending test: %s---------", self.test_name)
def runCmd(self, who, cmd, **kwargs):
if who == ROOT:
args = ["sudo"]
elif who == TEST:
args = ["sudo", "-u", TESTUSER, "-i"]
elif who == USER:
args = ["bash", "-c", cmd]
else:
raise Exception("Invalid 'who' for runCmd")
args.extend(["bash", "-c"])
args.append(cmd)
expect_fail = True
if "fail" in kwargs:
expect_fail = kwargs["fail"]
del kwargs["fail"]
(ret, foo) = LsshTestCase._runCmd(args, kwargs, self)
if expect_fail:
if expect_fail is True:
expect_fail = " ".join(args)
self.assertIs(
self.cmd.returncode, 0,
expect_fail + (" : %sret=%d: %s" % (self.error,
self.cmd.returncode,
ret[1])))
return [self.cmd.returncode] + list(ret)
@staticmethod
def _runCmd(args, kwargs={}, obj=None):
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
log.info("Starting cmd: %s, %s" % (str(args), str(kwargs)))
cmd = subprocess.Popen(args, **kwargs)
if obj:
obj.cmd = cmd
try:
ret = cmd.communicate()
except IOError, e:
if e.errno == 4:
ret = ["", ""]
self.cmd.returncode = -127
log.info("Finised cmd: ret=%d: %s", cmd.returncode, ret[0])
return (ret, cmd)
def enableSshKey(self):
self.runCmd(TEST, "cp -f ~/.ssh/authorized_keys_ ~/.ssh/authorized_keys")
def disableSshKey(self):
self.runCmd(TEST, "rm -f ~/.ssh/authorized_keys")
def _run(self):
try:
self.orgtest()
except KeyboardInterrupt:
log.exception("Keyboard Interrupt")
raise
except:
log.exception("Exception during test")
raise
finally:
try:
# Restore the tty in case ssh trashed it.
termios.tcsetattr(sys.stdin, termios.TCSANOW, ORIG_TTY)
except:
pass
def CriticalTest(func):
def _failingTest(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
THE_TEST._resultForDoCleanups.stop()
raise
return _failingTest
def init():
global TESTUSER, TESTPASS, ORIG_TTY
log.init("test")
log.logger.setLevel(log.DEBUG)
if "DISPLAY" in os.environ:
del os.environ["DISPLAY"]
if "TESTUSER" in os.environ:
TESTUSER = os.environ["TESTUSER"]
else:
print ("The environment variable TESTUSER is not set. It must be set " +
"to a test user")
sys.exit(-1)
ORIG_TTY = termios.tcgetattr(sys.stdin)
# Make a test user password
r = os.urandom(12)
p = []
for c in r:
c = ord(c)
if c & 0x8:
p.append(chr(0x41 + c % 26))
else:
p.append(chr(0x61 + c % 26))
TESTPASS = "".join(p)
print "Will set the '%s' user's password to: %s" % (TESTUSER, TESTPASS)
| redshodan/lazarus-ssh | tests/utils.py | Python | lgpl-2.1 | 5,573 |
# API for the TI eQEP hardware driver I wrote
# We need OS operations for this
import os
import select
class eQEP(object):
# Modes
MODE_ABSOLUTE = 0
MODE_RELATIVE = 1
# eQEP Controller Locations
eQEP0 = "/sys/devices/ocp.2/48300000.epwmss/48300180.eqep"
eQEP1 = "/sys/devices/ocp.2/48302000.epwmss/48302180.eqep"
eQEP2 = "/sys/devices/ocp.2/48304000.epwmss/48304180.eqep"
# Set the mode of the eQEP hardware
def set_mode(self, mode):
# Open the mode attribute file
attribute = open(self.path + "/mode", "w")
# Write the desired mode into the file
attribute.write(str(mode))
# Close the file
attribute.close()
# Get the mode of the eQEP hardware
def get_mode(self):
# Open the attribute file
attribute = open(self.path + "/mode", "r")
# Get the value
mode = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return mode
# Set the unit timer period of the eQEP hardware
def set_period(self, period):
# Open the mode attribute file
attribute = open(self.path + "/period", "w")
# Write the desired mode into the file
attribute.write(str(period))
# Close the file
attribute.close()
# Get the unit timer period of the eQEP hardware
def get_period(self):
# Open the attribute file
attribute = open(self.path + "/period", "r")
# Get the value
period = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return period
# Set the current position of the encoder hardware
def set_position(self, position):
# Open the mode attribute file
attribute = open(self.path + "/position", "w")
# Write the desired mode into the file
attribute.write(str(position))
# Close the file
attribute.close()
# Get the immediate position of the encoder hardare
def get_position(self):
# Open the attribute file
attribute = open(self.path + "/position", "r")
# Get the value
position = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return position
# Poll the position, returns when new data is available
def poll_position(self):
# Poll the position file
self.poller.poll(-1)
# Seek to the beginning of the file to get the data
os.lseek(self.fd, 0, 0)
# Return the position
return int(os.read(self.fd, 16))
# Constructor - specify the path and the mode
def __init__(self, path, mode):
# Base path of the eQEP sysfs entry (ex. /sys/devices/ocp.2/48302000.epwmss/48302180.eqep)
self.path = path;
# Set the mode
self.set_mode(mode)
# Reset the position
self.set_position(0)
# Setup polling system
self.fd = os.open(self.path + "/position", os.O_RDONLY, os.O_NONBLOCK)
# Create the poll object
self.poller = select.poll()
self.poller.register(self.fd, select.POLLPRI)
# Deconstructor
def __del__(self):
# Cleanup polling system
self.poller.unregister(self.fd)
os.close(self.fd)
| ValRose/Rose_Bone | PythonLibraries/eqep.py | Python | mit | 3,612 |
import json
import subprocess
import sys
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
from cricket.events import EventSource
from cricket.model import TestMethod
from cricket.pipes import PipedTestResult, PipedTestRunner
def enqueue_output(out, queue):
"""A utility method for consuming piped output from a subprocess.
Reads content from `out` one line at a time, and puts it onto
queue for consumption in a separate thread.
"""
for line in iter(out.readline, b''):
queue.put(line.strip().decode('utf-8'))
out.close()
class Executor(EventSource):
"A wrapper around the subprocess that executes tests."
def __init__(self, project, count, labels):
self.project = project
self.proc = subprocess.Popen(
self.project.execute_commandline(labels),
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
bufsize=1,
close_fds='posix' in sys.builtin_module_names
)
# Piped stdout/stderr reads are blocking; therefore, we need to
# do all our readline calls in a background thread, and use a
# queue object to store lines that have been read.
self.stdout = Queue()
t = Thread(target=enqueue_output, args=(self.proc.stdout, self.stdout))
t.daemon = True
t.start()
self.stderr = Queue()
t = Thread(target=enqueue_output, args=(self.proc.stderr, self.stderr))
t.daemon = True
t.start()
# The TestMethod object currently under execution.
self.current_test = None
# An accumulator of ouput from the tests. If buffer is None,
# then the test suite isn't currently running - it's in suite
# setup/teardown.
self.buffer = None
self.output = ''
# An accumulator for error output from the tests.
self.error_buffer = []
# The timestamp when current_test started
self.start_time = None
# The total count of tests under execution
self.total_count = count
# The count of tests that have been executed.
self.completed_count = 0
# The count of specific test results.
self.result_count = {}
@property
def is_running(self):
"Return True if this runner currently running."
return self.proc.poll() is None
@property
def any_failed(self):
return sum(self.result_count.get(state, 0) for state in TestMethod.FAILING_STATES)
def terminate(self):
"Stop the executor."
self.proc.terminate()
def poll(self):
"Poll the runner looking for new test output"
stopped = False
finished = False
# Check to see if the subprocess is still running.
# If it isn't, raise an error.
if self.proc is None:
stopped = True
elif self.proc.poll() is not None:
stopped = True
try:
while True:
line = self.stdout.get(block=False)
if not line:
continue
# Process all the full lines that are available
# Look for a separator.
if line in (PipedTestResult.RESULT_SEPARATOR, PipedTestRunner.START_TEST_RESULTS, PipedTestRunner.END_TEST_RESULTS):
if self.buffer is None:
# Preamble is finished. Set up the line buffer.
self.buffer = []
else:
# Start of new test result; record the last result
# Then, work out what content goes where.
output = ''
for buff in self.buffer[1:-2]:
output += buff + '\n'
pre, post = [json.loads(self.buffer[0]), json.loads(self.buffer[-1])]
if post['status'] == 'OK':
status = TestMethod.STATUS_PASS
error = None
elif post['status'] == 's':
status = TestMethod.STATUS_SKIP
error = 'Skipped: ' + post.get('error')
elif post['status'] == 'F':
status = TestMethod.STATUS_FAIL
error = post.get('error')
elif post['status'] == 'x':
status = TestMethod.STATUS_EXPECTED_FAIL
error = post.get('error')
elif post['status'] == 'u':
status = TestMethod.STATUS_UNEXPECTED_SUCCESS
error = None
elif post['status'] == 'E':
status = TestMethod.STATUS_ERROR
error = post.get('error')
# Increase the count of executed tests
self.completed_count = self.completed_count + 1
# Get the start and end times for the test
start_time = float(pre['start_time'])
end_time = float(post['end_time'])
self.current_test.description = post['description']
self.current_test.set_result(
status=status,
output=output + post.get('output'),
error=error,
duration=end_time - start_time,
)
# Work out how long the suite has left to run (approximately)
if self.start_time is None:
self.start_time = start_time
total_duration = end_time - self.start_time
time_per_test = total_duration / self.completed_count
remaining_time = (self.total_count - self.completed_count) * time_per_test
if remaining_time > 4800:
remaining = '%s hours' % int(remaining_time / 2400)
elif remaining_time > 2400:
remaining = '%s hour' % int(remaining_time / 2400)
elif remaining_time > 120:
remaining = '%s mins' % int(remaining_time / 60)
elif remaining_time > 60:
remaining = '%s min' % int(remaining_time / 60)
else:
remaining = '%ss' % int(remaining_time)
# Update test result counts
self.result_count.setdefault(status, 0)
self.result_count[status] = self.result_count[status] + 1
# Notify the display to update.
self.emit('test_end', test_path=self.current_test.path, result=status, remaining_time=remaining)
# Clear the decks for the next test.
self.current_test = None
self.buffer = []
if line == PipedTestRunner.END_TEST_RESULTS:
# End of test execution.
# Mark the runner as finished, and move back
# to a pre-test state in the results.
finished = True
self.buffer = None
else:
# Not a separator line, so it's actual content.
if self.buffer is None:
# Suite isn't running yet - just display the output
# as a status update line.
self.emit('test_status_update', update=line)
else:
# Suite is running - have we got an active test?
# Doctest (and some other tools) output invisible escape sequences.
# Strip these if they exist.
if line.startswith('\x1b'):
line = line[line.find('{'):]
try:
json.loads(line)
except ValueError:
self.output += line + '\n'
self.emit('test_output_update', output=self.output)
# Store the cleaned buffer
self.buffer.append(line)
# If we don't have an currently active test, this line will
# contain the path for the test.
if self.current_test is None:
try:
# No active test; first line tells us which test is running.
pre = json.loads(line)
except ValueError:
self.emit('suit_end')
return True
self.current_test = self.project.confirm_exists(pre['path'])
self.emit('test_start', test_path=pre['path'])
except Empty:
# queue.get() raises an exception when the queue is empty.
# This means there is no more output to consume at this time.
pass
# If we're not finished, requeue the event.
if finished:
if self.error_buffer:
self.emit('suite_end', error='\n'.join(self.error_buffer))
else:
self.emit('suite_end')
return False
elif stopped:
if self.current_test:
self.current_test.set_result(
status = TestMethod.STATUS_FAIL,
output=self.output,
error='Test output ended unexpectedly',
duration=0,
)
# Notify the display to update.
self.emit('test_end', test_path=self.current_test.path, result=TestMethod.STATUS_FAIL, remaining_time=0)
# Suite has stopped producing output.
#if self.error_buffer:
# self.emit('suite_error', error=b'\n'.join(self.error_buffer))
#else:
# self.emit('suite_error', error='Test output ended unexpectedly')
# Suite has finished; don't requeue
return False
else:
# Still running - requeue event.
return True
| hashkat/hashkat | reproducers/cricket/executor.py | Python | gpl-3.0 | 10,708 |
import pkg_resources
try:
__version__ = pkg_resources.get_distribution("clarity_scripts").version
except pkg_resources.DistributionNotFound:
__version__ = ""
| EdinburghGenomics/clarity_scripts | EPPs/__init__.py | Python | mit | 167 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/chernomirdinmacuvele/Documents/workspace/PescArt2.0/UserInt/ui_Ficha_Recolha_Tab_Amostras.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_frmFichaRecolhaAmostras(object):
def setupUi(self, frmFichaRecolhaAmostras):
frmFichaRecolhaAmostras.setObjectName("frmFichaRecolhaAmostras")
frmFichaRecolhaAmostras.resize(1060, 701)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(frmFichaRecolhaAmostras.sizePolicy().hasHeightForWidth())
frmFichaRecolhaAmostras.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/newPrefix/clipboard.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
frmFichaRecolhaAmostras.setWindowIcon(icon)
frmFichaRecolhaAmostras.setStyleSheet("#tabAmostra {\n"
"background-image:url(:/newPrefix/Background/Almora.jpg);\n"
"}\n"
"QTabBar::tab {color: Black;}\n"
"\n"
"#frmFichaRecolhaAmostras {\n"
"Background-color: #7FFFD4;\n"
"border-image:url(:/newPrefix/Background/Almora.jpg)\n"
"}")
self.gridLayout_6 = QtWidgets.QGridLayout(frmFichaRecolhaAmostras)
self.gridLayout_6.setObjectName("gridLayout_6")
self.tabAmostra = QtWidgets.QTabWidget(frmFichaRecolhaAmostras)
self.tabAmostra.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabAmostra.setObjectName("tabAmostra")
self.tabViagem = QtWidgets.QWidget()
self.tabViagem.setObjectName("tabViagem")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tabViagem)
self.gridLayout_2.setObjectName("gridLayout_2")
self.GLtabViagem = QtWidgets.QGridLayout()
self.GLtabViagem.setObjectName("GLtabViagem")
self.gridLayout_2.addLayout(self.GLtabViagem, 0, 0, 1, 1)
self.tabAmostra.addTab(self.tabViagem, "")
self.tbAmostras = QtWidgets.QWidget()
self.tbAmostras.setObjectName("tbAmostras")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tbAmostras)
self.gridLayout_4.setObjectName("gridLayout_4")
self.GLtabAmostEspecies = QtWidgets.QGridLayout()
self.GLtabAmostEspecies.setObjectName("GLtabAmostEspecies")
self.gridLayout_4.addLayout(self.GLtabAmostEspecies, 0, 0, 1, 1)
self.tabAmostra.addTab(self.tbAmostras, "")
self.gridLayout_6.addWidget(self.tabAmostra, 0, 0, 1, 1)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem, 0, 0, 2, 1)
self.PBVoltar = QtWidgets.QPushButton(frmFichaRecolhaAmostras)
self.PBVoltar.setObjectName("PBVoltar")
self.gridLayout_5.addWidget(self.PBVoltar, 0, 1, 2, 1)
self.PBSair = QtWidgets.QPushButton(frmFichaRecolhaAmostras)
self.PBSair.setObjectName("PBSair")
self.gridLayout_5.addWidget(self.PBSair, 0, 2, 2, 1)
self.gridLayout_6.addLayout(self.gridLayout_5, 1, 0, 1, 1)
self.retranslateUi(frmFichaRecolhaAmostras)
self.tabAmostra.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(frmFichaRecolhaAmostras)
def retranslateUi(self, frmFichaRecolhaAmostras):
_translate = QtCore.QCoreApplication.translate
frmFichaRecolhaAmostras.setWindowTitle(_translate("frmFichaRecolhaAmostras", "Ficha de Recolha"))
self.tabAmostra.setTabText(self.tabAmostra.indexOf(self.tabViagem), _translate("frmFichaRecolhaAmostras", "Viagem"))
self.tabAmostra.setTabText(self.tabAmostra.indexOf(self.tbAmostras), _translate("frmFichaRecolhaAmostras", "Amos. Especie"))
self.PBVoltar.setText(_translate("frmFichaRecolhaAmostras", "Voltar"))
self.PBSair.setText(_translate("frmFichaRecolhaAmostras", "Sair"))
import icons_rc
import images_rc
| InUrSys/PescArt2.0 | GeneratedFiles/ui_Ficha_Recolha_Tab_Amostras.py | Python | gpl-3.0 | 4,188 |
# -*- coding: utf-8 -*-
from mock import patch, MagicMock, Mock
from django.utils import six
from django.test import RequestFactory
import pytest
import nav.web.ldapauth
from nav.web import auth
LDAP_ACCOUNT = auth.Account(login='knight', ext_sync='ldap', password='shrubbery')
PLAIN_ACCOUNT = auth.Account(login='knight', password='shrubbery')
REMOTE_USER_ACCOUNT = auth.Account(
login='knight', ext_sync='REMOTE_USER', password='shrubbery'
)
class FakeSession(dict):
def set_expiry(self, *_):
pass
def save(self, *_):
pass
@patch("nav.web.auth.Account.save", new=MagicMock(return_value=True))
@patch("nav.web.auth.Account.objects.get", new=MagicMock(return_value=LDAP_ACCOUNT))
class TestLdapAuthenticate(object):
def test_authenticate_should_return_account_when_ldap_says_yes(self):
ldap_user = Mock()
ldap_user.is_admin.return_value = None # mock to avoid database access
with patch("nav.web.ldapauth.available", new=True):
with patch("nav.web.ldapauth.authenticate", return_value=ldap_user):
assert auth.authenticate('knight', 'shrubbery') == LDAP_ACCOUNT
def test_authenticate_should_return_false_when_ldap_says_no(self):
with patch("nav.web.ldapauth.available", new=True):
with patch("nav.web.ldapauth.authenticate", return_value=False):
assert not auth.authenticate('knight', 'shrubbery')
def test_authenticate_should_fallback_when_ldap_is_disabled(self):
with patch("nav.web.ldapauth.available", new=False):
assert auth.authenticate('knight', 'shrubbery') == LDAP_ACCOUNT
@patch("nav.web.auth.Account.save", new=MagicMock(return_value=True))
@patch("nav.web.auth.Account.objects.get", new=MagicMock(return_value=PLAIN_ACCOUNT))
@patch("nav.web.ldapauth.available", new=False)
class TestNormalAuthenticate(object):
def test_authenticate_should_return_account_when_password_is_ok(self):
with patch("nav.web.auth.Account.check_password", return_value=True):
assert auth.authenticate('knight', 'shrubbery') == PLAIN_ACCOUNT
def test_authenticate_should_return_false_when_ldap_says_no(self):
with patch("nav.web.auth.Account.check_password", return_value=False):
assert not auth.authenticate('knight', 'rabbit')
class TestRemoteUserAuthenticate(object):
def test_authenticate_remote_user_should_return_account_if_header_set(self):
r = RequestFactory()
request = r.get('/')
request.META['REMOTE_USER'] = 'knight'
with patch("nav.web.auth._config.getboolean", return_value=True):
with patch(
"nav.web.auth.Account.objects.get",
new=MagicMock(return_value=REMOTE_USER_ACCOUNT),
):
assert auth.authenticate_remote_user(request) == REMOTE_USER_ACCOUNT
def test_authenticate_remote_user_should_return_none_if_header_not_set(self):
r = RequestFactory()
request = r.get('/')
with patch("nav.web.auth._config.getboolean", return_value=True):
assert auth.authenticate_remote_user(request) == None
def test_authenticate_remote_user_should_return_false_if_account_locked(self):
r = RequestFactory()
request = r.get('/')
request.META['REMOTE_USER'] = 'knight'
with patch("nav.web.auth._config.getboolean", return_value=True):
with patch(
"nav.web.auth.Account.objects.get", return_value=REMOTE_USER_ACCOUNT
):
with patch("nav.web.auth.LogEntry.add_log_entry"):
with patch("nav.web.auth.Account.locked", return_value=True):
assert auth.authenticate_remote_user(request) == False
class TestGetStandardUrls(object):
def test_get_login_url_default(self):
r = RequestFactory()
request = r.get('/')
raw_login_url = auth.LOGIN_URL
result = auth.get_login_url(request)
assert result.startswith(raw_login_url)
def test_get_login_url_remote_login_url(self):
r = RequestFactory()
request = r.get('/')
request.META['REMOTE_USER'] = 'knight'
with patch("nav.web.auth._config.getboolean", return_value=True):
with patch("nav.web.auth._config.get", return_value='foo'):
result = auth.get_login_url(request)
assert result == 'foo'
def test_get_logout_url_default(self):
r = RequestFactory()
request = r.get('/')
result = auth.get_logout_url(request)
assert result == auth.LOGOUT_URL
def test_get_logout_url_remote_logout_url(self):
r = RequestFactory()
request = r.get('/')
request.META['REMOTE_USER'] = 'knight'
with patch("nav.web.auth._config.getboolean", return_value=True):
with patch("nav.web.auth._config.get", return_value='foo'):
result = auth.get_logout_url(request)
assert result == 'foo'
class TestGetRemoteUsername(object):
def test_no_request(self):
with patch("nav.web.auth._config.getboolean", return_value=False):
result = auth.get_remote_username(None)
assert result is None
def test_not_enabled(self):
r = RequestFactory()
request = r.get('/')
with patch("nav.web.auth._config.getboolean", return_value=False):
result = auth.get_remote_username(request)
assert result is None
def test_enabled_but_remote_user_unset(self):
r = RequestFactory()
request = r.get('/')
with patch("nav.web.auth._config.getboolean", return_value=True):
result = auth.get_remote_username(request)
assert result is None
def test_enabled_and_remote_user_set(self):
r = RequestFactory()
request = r.get('/')
request.META['REMOTE_USER'] = 'knight'
with patch("nav.web.auth._config.getboolean", return_value=True):
result = auth.get_remote_username(request)
assert result == 'knight'
class TestLoginRemoteUser(object):
def test_remote_user_unset(self):
r = RequestFactory()
request = r.get('/')
request.session = FakeSession()
with patch("nav.web.auth.get_remote_username", return_value=False):
auth.login_remote_user(request)
assert not getattr(request, 'account', False)
assert auth.ACCOUNT_ID_VAR not in request.session
def test_remote_user_set(self):
r = RequestFactory()
request = r.get('/')
request.session = FakeSession()
with patch("nav.web.auth.get_remote_username", return_value=True):
with patch(
"nav.web.auth.authenticate_remote_user",
return_value=REMOTE_USER_ACCOUNT,
):
auth.login_remote_user(request)
assert hasattr(request, 'account')
assert request.account == REMOTE_USER_ACCOUNT
assert auth.ACCOUNT_ID_VAR in request.session
assert (
request.session.get(auth.ACCOUNT_ID_VAR, None)
== REMOTE_USER_ACCOUNT.id
)
class TestLdapUser(object):
@patch.dict(
"nav.web.ldapauth._config._sections",
{
'ldap': {
'__name__': 'ldap',
'basedn': 'empty',
'manager': 'empty',
'manager_password': 'empty',
'uid_attr': 'sAMAccountName',
'encoding': 'utf-8',
},
},
)
def test_search_result_with_referrals_should_be_considered_empty(self):
"""LP#1207737"""
conn = Mock(
**{
'search_s.return_value': [
(None, "restaurant"),
(None, "at the end of the universe"),
]
}
)
u = nav.web.ldapauth.LDAPUser("zaphod", conn)
with pytest.raises(nav.web.ldapauth.UserNotFound):
u.search_dn()
@patch.dict(
"nav.web.ldapauth._config._sections",
{
'ldap': {
'__name__': 'ldap',
'basedn': 'empty',
'lookupmethod': 'direct',
'uid_attr': 'uid',
'encoding': 'utf-8',
'suffix': '',
}
},
)
def test_non_ascii_password_should_work(self):
"""LP#1213818"""
conn = Mock(
**{
'simple_bind_s.side_effect': lambda x, y: (
six.text_type(x),
six.text_type(y),
),
}
)
u = nav.web.ldapauth.LDAPUser(u"zaphod", conn)
u.bind(u"æøå")
@patch.dict(
"nav.web.ldapauth._config._sections",
{
'ldap': {
'__name__': 'ldap',
'basedn': 'cn=users,dc=example,dc=org',
'lookupmethod': 'direct',
'uid_attr': 'uid',
'encoding': 'utf-8',
'group_search': '(member=%%s)',
},
},
)
def test_is_group_member_for_non_ascii_user_should_not_raise(self):
"""LP#1301794"""
def fake_search(base, scope, filtr):
six.text_type(base)
six.text_type(filtr)
return []
conn = Mock(
**{
'search_s.side_effect': fake_search,
}
)
u = nav.web.ldapauth.LDAPUser(u"Ægir", conn)
u.is_group_member('cn=noc-operators,cn=groups,dc=example,dc=com')
@patch.dict(
"nav.web.ldapauth._config._sections",
{
'ldap': {
'__name__': 'ldap',
'basedn': 'cn=users,dc=example,dc=org',
'lookupmethod': 'direct',
'uid_attr': 'uid',
'encoding': 'utf-8',
'require_entitlement': 'president',
'admin_entitlement': 'boss',
'entitlement_attribute': 'eduPersonEntitlement',
},
},
)
class TestLdapEntitlements(object):
def test_required_entitlement_should_be_verified(self, user_zaphod):
u = nav.web.ldapauth.LDAPUser("zaphod", user_zaphod)
assert u.has_entitlement('president')
def test_missing_entitlement_should_not_be_verified(self, user_marvin):
u = nav.web.ldapauth.LDAPUser("marvin", user_marvin)
assert not u.has_entitlement('president')
def test_admin_entitlement_should_be_verified(self, user_zaphod):
u = nav.web.ldapauth.LDAPUser("zaphod", user_zaphod)
assert u.is_admin()
def test_missing_admin_entitlement_should_be_verified(self, user_marvin):
u = nav.web.ldapauth.LDAPUser("marvin", user_marvin)
assert not u.is_admin()
@patch.dict(
"nav.web.ldapauth._config._sections",
{
'ldap': {
'__name__': 'ldap',
'basedn': 'cn=users,dc=example,dc=org',
'lookupmethod': 'direct',
'uid_attr': 'uid',
'encoding': 'utf-8',
'require_entitlement': 'president',
'admin_entitlement': '',
'entitlement_attribute': 'eduPersonEntitlement',
},
},
)
def test_no_admin_entitlement_option_should_make_no_admin_decision(user_zaphod):
u = nav.web.ldapauth.LDAPUser("zaphod", user_zaphod)
assert u.is_admin() is None
#
# Pytest fixtures
#
@pytest.fixture
def user_zaphod():
return Mock(
**{
'search_s.return_value': [
(
u'uid=zaphod,cn=users,dc=example,dc=org',
{u'eduPersonEntitlement': [b'president', b'boss']},
)
]
}
)
@pytest.fixture
def user_marvin():
return Mock(
**{
'search_s.return_value': [
(
u'uid=marvin,cn=users,dc=example,dc=org',
{u'eduPersonEntitlement': [b'paranoid']},
)
]
}
)
| hmpf/nav | tests/unittests/general/webfront_test.py | Python | gpl-3.0 | 12,069 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
VERSION="0.1.9"
config = {
'name':'rosetta_sip_factory',
'version':VERSION,
'author':'Sean Mosely',
'author_email':'sean.mosely@gmail.com',
'packages':['rosetta_sip_factory',],
'description':'Python library for building Submission Information Packages for the Ex Libris Rosetta digital preservation application',
'install_requires':['lxml>=3.6.4', 'mets_dnx'],
'download_url': 'https://github.com/NLNZDigitalPreservation/rosetta_sip_factory/archive/v'+VERSION+'.tar.gz',
'license': 'MIT',
}
setup(**config) | NLNZDigitalPreservation/rosetta_sip_factory | setup.py | Python | mit | 614 |
#!/usr/bin/env python
#
# (C) 2005 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""
A commandline tool that creates users and creates user configuration
files. This configuration file is used by KPIClient
"""
from Kamaelia.Community.AM.Kamaelia.KPIFramework.KPI.DB import LKH
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print "usage:", sys.argv[0], "dbfile usercfg1 [usercfg2].. "
sys.exit(1)
dbfile = sys.argv[1]
for index in range(2, len(sys.argv)):
LKH.createUser(dbfile, sys.argv[index])
| sparkslabs/kamaelia_ | Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/Tools/createuser.py | Python | apache-2.0 | 1,450 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
# Program Name : auth.py
# Description : Delphix API Example for Python
# Author : Unknown
# Created : 2017-08-09
# Version : v1.0.0
#
# Requirements :
# 1.) Change values below as required
#
# Usage: python auth.py
#
#########################################################
# DELPHIX CORP #
# Please make changes to the parameters below as req'd! #
#########################################################
DMUSER='admin'
DMPASS='Admin-12'
DELAYTIMESEC=10
BASEURL='http://172.16.129.132/resources/json/delphix'
#########################################################
# NO CHANGES REQUIRED BELOW THIS POINT #
#########################################################
import requests
import json
#
# Request Headers ...
#
req_headers = {
'Content-Type': 'application/json'
}
#
# Python session, also handles the cookies ...
#
session = requests.session()
#
# Authenticate ...
#
print ("Authenticating URL " + BASEURL + " ... ")
formdata = '{ "type": "APISession", "version": { "type": "APIVersion", "major": 1, "minor": 7, "micro": 0 } }'
r = session.post(BASEURL+'/session', data=formdata, headers=req_headers, allow_redirects=False)
print (r.text)
#
# Login ...
#
print ("Login ... ")
formdata = '{ "type": "LoginRequest", "username": "' + DMUSER + '", "password": "' + DMPASS + '" }'
r = session.post(BASEURL+'/login', data=formdata, headers=req_headers, allow_redirects=False)
print (r.text)
#
# About ...
#
print ("About ... ")
r = session.get(BASEURL+'/about')
print (r.text)
#
# JSON Parsing ...
#
print ("JSON Parsing Examples ...")
j = json.loads(r.text)
print (j['status'])
print (j['result']['buildTitle'])
print (j['result']['apiVersion']['major'])
| duckback00/dxapikit | language_examples/auth.py | Python | apache-2.0 | 2,363 |
#
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from couchbase.exceptions import (NotFoundError, DeltaBadvalError)
from couchbase.tests.base import ConnectionTestCase
class ArithmeticTest(ConnectionTestCase):
def test_trivial_incrdecr(self):
key = self.gen_key("trivial_incrdecr")
self.cb.remove(key, quiet=True)
rv_arith = self.cb.counter(key, initial=1, delta=1)
rv_get = self.cb.get(key)
self.assertEqual(rv_arith.value, 1)
self.assertEqual(int(rv_get.value), 1)
rv = self.cb.counter(key)
self.assertEqual(rv.value, 2)
rv = self.cb.counter(key, delta=-1)
self.assertEqual(rv.value, 1)
self.assertEqual(int(self.cb.get(key).value), 1)
rv = self.cb.counter(key, delta=-1)
self.assertEqual(rv.value, 0)
self.assertEqual(int(self.cb.get(key).value), 0)
def test_incr_notfound(self):
key = self.gen_key("incr_notfound")
self.cb.remove(key, quiet=True)
self.assertRaises(NotFoundError, self.cb.counter, key)
def test_incr_badval(self):
key = self.gen_key("incr_badval")
self.cb.upsert(key, "THIS IS SPARTA")
self.assertRaises(DeltaBadvalError, self.cb.counter, key)
def test_incr_multi(self):
keys = self.gen_key_list(amount=5, prefix="incr_multi")
def _multi_lim_assert(expected):
for k, v in self.cb.get_multi(keys).items():
self.assertTrue(k in keys)
self.assertEqual(v.value, expected)
self.cb.remove_multi(keys, quiet=True)
self.cb.counter_multi(keys, initial=5)
_multi_lim_assert(5)
self.cb.counter_multi(keys)
_multi_lim_assert(6)
self.cb.counter_multi(keys, delta=-1)
_multi_lim_assert(5)
self.cb.counter_multi(keys, delta=10)
_multi_lim_assert(15)
self.cb.counter_multi(keys, delta=-6)
_multi_lim_assert(9)
self.cb.remove(keys[0])
self.assertRaises(NotFoundError, self.cb.counter_multi, keys)
def test_incr_extended(self):
key = self.gen_key("incr_extended")
self.cb.remove(key, quiet=True)
rv = self.cb.counter(key, initial=10)
self.assertEqual(rv.value, 10)
srv = self.cb.upsert(key, "42", cas=rv.cas)
self.assertTrue(srv.success)
# test with multiple values?
klist = self.gen_key_list(amount=5, prefix="incr_extended_list")
self.cb.remove_multi(klist, quiet=True)
rvs = self.cb.counter_multi(klist, initial=40)
[ self.assertEqual(x.value, 40) for x in rvs.values() ]
self.assertEqual(sorted(list(rvs.keys())), sorted(klist))
if __name__ == '__main__':
unittest.main()
| mnunberg/couchbase-python-client | couchbase/tests/cases/arithmetic_t.py | Python | apache-2.0 | 3,301 |
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Yield related codes.
The normal "yield", and the Python 3.3 or higher "yield from" variant.
"""
from .ErrorCodes import getErrorExitCode, getReleaseCode
from .Helpers import generateChildExpressionsCode
def generateYieldCode(to_name, expression, emit, context):
value_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
# In handlers, we must preserve/restore the exception.
preserve_exception = expression.isExceptionPreserving()
emit(
"%s = %s( generator, %s );" % (
to_name,
"YIELD" if not preserve_exception else "YIELD_IN_HANDLER",
value_name
if context.needsCleanup(value_name) else
"INCREASE_REFCOUNT( %s )" % value_name
)
)
if context.needsCleanup(value_name):
context.removeCleanupTempName(value_name)
getErrorExitCode(
check_name = to_name,
emit = emit,
context = context
)
# Comes as only borrowed.
# context.addCleanupTempName(to_name)
def generateYieldFromCode(to_name, expression, emit, context):
value_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
# In handlers, we must preserve/restore the exception.
preserve_exception = expression.isExceptionPreserving()
emit(
"%s = %s( generator, %s );" % (
to_name,
"YIELD_FROM" if not preserve_exception else "YIELD_FROM_IN_HANDLER",
value_name
if context.needsCleanup(value_name) else
"INCREASE_REFCOUNT( %s )" % value_name
)
)
if not context.needsCleanup(value_name):
context.addCleanupTempName(value_name)
getReleaseCode(
release_name = value_name,
emit = emit,
context = context
)
getErrorExitCode(
check_name = to_name,
emit = emit,
context = context
)
context.addCleanupTempName(to_name)
| wfxiang08/Nuitka | nuitka/codegen/YieldCodes.py | Python | apache-2.0 | 2,878 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Module for the downloading, checking, and unpacking of necessary files into the source tree.
"""
import argparse
import configparser
import enum
import hashlib
import shutil
import subprocess
import sys
import urllib.request
from pathlib import Path
from _common import ENCODING, USE_REGISTRY, ExtractorEnum, get_logger, \
get_chromium_version, add_common_params
from _extraction import extract_tar_file, extract_with_7z, extract_with_winrar
sys.path.insert(0, str(Path(__file__).parent / 'third_party'))
import schema #pylint: disable=wrong-import-position
sys.path.pop(0)
# Constants
class HashesURLEnum(str, enum.Enum):
"""Enum for supported hash URL schemes"""
chromium = 'chromium'
class HashMismatchError(BaseException):
"""Exception for computed hashes not matching expected hashes"""
class DownloadInfo: #pylint: disable=too-few-public-methods
"""Representation of an downloads.ini file for downloading files"""
_hashes = ('md5', 'sha1', 'sha256', 'sha512')
hash_url_delimiter = '|'
_nonempty_keys = ('url', 'download_filename')
_optional_keys = (
'version',
'strip_leading_dirs',
)
_passthrough_properties = (*_nonempty_keys, *_optional_keys, 'extractor', 'output_path')
_ini_vars = {
'_chromium_version': get_chromium_version(),
}
@staticmethod
def _is_hash_url(value):
return value.count(DownloadInfo.hash_url_delimiter) == 2 and value.split(
DownloadInfo.hash_url_delimiter)[0] in iter(HashesURLEnum)
_schema = schema.Schema({
schema.Optional(schema.And(str, len)): {
**{x: schema.And(str, len)
for x in _nonempty_keys},
'output_path': (lambda x: str(Path(x).relative_to(''))),
**{schema.Optional(x): schema.And(str, len)
for x in _optional_keys},
schema.Optional('extractor'): schema.Or(ExtractorEnum.TAR, ExtractorEnum.SEVENZIP,
ExtractorEnum.WINRAR),
schema.Optional(schema.Or(*_hashes)): schema.And(str, len),
schema.Optional('hash_url'): lambda x: DownloadInfo._is_hash_url(x), #pylint: disable=unnecessary-lambda
}
})
class _DownloadsProperties: #pylint: disable=too-few-public-methods
def __init__(self, section_dict, passthrough_properties, hashes):
self._section_dict = section_dict
self._passthrough_properties = passthrough_properties
self._hashes = hashes
def has_hash_url(self):
"""
Returns a boolean indicating whether the current
download has a hash URL"""
return 'hash_url' in self._section_dict
def __getattr__(self, name):
if name in self._passthrough_properties:
return self._section_dict.get(name, fallback=None)
if name == 'hashes':
hashes_dict = {}
for hash_name in (*self._hashes, 'hash_url'):
value = self._section_dict.get(hash_name, fallback=None)
if value:
if hash_name == 'hash_url':
value = value.split(DownloadInfo.hash_url_delimiter)
hashes_dict[hash_name] = value
return hashes_dict
raise AttributeError('"{}" has no attribute "{}"'.format(type(self).__name__, name))
def _parse_data(self, path):
"""
Parses an INI file located at path
Raises schema.SchemaError if validation fails
"""
def _section_generator(data):
for section in data:
if section == configparser.DEFAULTSECT:
continue
yield section, dict(
filter(lambda x: x[0] not in self._ini_vars, data.items(section)))
new_data = configparser.ConfigParser(defaults=self._ini_vars)
with path.open(encoding=ENCODING) as ini_file:
new_data.read_file(ini_file, source=str(path))
try:
self._schema.validate(dict(_section_generator(new_data)))
except schema.SchemaError as exc:
get_logger().error('downloads.ini failed schema validation (located in %s)', path)
raise exc
return new_data
def __init__(self, ini_paths):
"""Reads an iterable of pathlib.Path to download.ini files"""
self._data = configparser.ConfigParser()
for path in ini_paths:
self._data.read_dict(self._parse_data(path))
def __getitem__(self, section):
"""
Returns an object with keys as attributes and
values already pre-processed strings
"""
return self._DownloadsProperties(self._data[section], self._passthrough_properties,
self._hashes)
def __contains__(self, item):
"""
Returns True if item is a name of a section; False otherwise.
"""
return self._data.has_section(item)
def __iter__(self):
"""Returns an iterator over the section names"""
return iter(self._data.sections())
def properties_iter(self):
"""Iterator for the download properties sorted by output path"""
return sorted(
map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path))))
class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods
"""Hook for urllib.request.urlretrieve to log progress information to console"""
def __init__(self):
self._max_len_printed = 0
self._last_percentage = None
def __call__(self, block_count, block_size, total_size):
# Use total_blocks to handle case total_size < block_size
# total_blocks is ceiling of total_size / block_size
# Ceiling division from: https://stackoverflow.com/a/17511341
total_blocks = -(-total_size // block_size)
if total_blocks > 0:
# Do not needlessly update the console. Since the console is
# updated synchronously, we don't want updating the console to
# bottleneck downloading. Thus, only refresh the output when the
# displayed value should change.
percentage = round(block_count / total_blocks, ndigits=3)
if percentage == self._last_percentage:
return
self._last_percentage = percentage
print('\r' + ' ' * self._max_len_printed, end='')
status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size)
else:
downloaded_estimate = block_count * block_size
status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate)
self._max_len_printed = len(status_line)
print('\r' + status_line, end='')
def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification):
reporthook = None
if show_progress:
reporthook = _UrlRetrieveReportHook()
if disable_ssl_verification:
import ssl
# TODO: Remove this or properly implement disabling SSL certificate verification
orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access
ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access
try:
urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)
finally:
# Try to reduce damage of hack by reverting original HTTPS context ASAP
if disable_ssl_verification:
ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access
if show_progress:
print()
def _download_if_needed(file_path, url, show_progress, disable_ssl_verification):
"""
Downloads a file from url to the specified path file_path if necessary.
If show_progress is True, download progress is printed to the console.
"""
if file_path.exists():
get_logger().info('%s already exists. Skipping download.', file_path)
return
# File name for partially download file
tmp_file_path = file_path.with_name(file_path.name + '.partial')
if tmp_file_path.exists():
get_logger().debug('Resuming downloading URL %s ...', url)
else:
get_logger().debug('Downloading URL %s ...', url)
# Perform download
if shutil.which('curl'):
get_logger().debug('Using curl')
try:
subprocess.run(['curl', '-L', '-o', str(tmp_file_path), '-C', '-', url], check=True)
except subprocess.CalledProcessError as exc:
get_logger().error('curl failed. Re-run the download command to resume downloading.')
raise exc
else:
get_logger().debug('Using urllib')
_download_via_urllib(url, tmp_file_path, show_progress, disable_ssl_verification)
# Download complete; rename file
tmp_file_path.rename(file_path)
def _chromium_hashes_generator(hashes_path):
with hashes_path.open(encoding=ENCODING) as hashes_file:
hash_lines = hashes_file.read().splitlines()
for hash_name, hash_hex, _ in map(lambda x: x.lower().split(' '), hash_lines):
if hash_name in hashlib.algorithms_available:
yield hash_name, hash_hex
else:
get_logger().warning('Skipping unknown hash algorithm: %s', hash_name)
def _get_hash_pairs(download_properties, cache_dir):
"""Generator of (hash_name, hash_hex) for the given download"""
for entry_type, entry_value in download_properties.hashes.items():
if entry_type == 'hash_url':
hash_processor, hash_filename, _ = entry_value
if hash_processor == 'chromium':
yield from _chromium_hashes_generator(cache_dir / hash_filename)
else:
raise ValueError('Unknown hash_url processor: %s' % hash_processor)
else:
yield entry_type, entry_value
def retrieve_downloads(download_info, cache_dir, show_progress, disable_ssl_verification=False):
"""
Retrieve downloads into the downloads cache.
download_info is the DowloadInfo of downloads to retrieve.
cache_dir is the pathlib.Path to the downloads cache.
show_progress is a boolean indicating if download progress is printed to the console.
disable_ssl_verification is a boolean indicating if certificate verification
should be disabled for downloads using HTTPS.
Raises FileNotFoundError if the downloads path does not exist.
Raises NotADirectoryError if the downloads path is not a directory.
"""
if not cache_dir.exists():
raise FileNotFoundError(cache_dir)
if not cache_dir.is_dir():
raise NotADirectoryError(cache_dir)
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Downloading "%s" to "%s" ...', download_name,
download_properties.download_filename)
download_path = cache_dir / download_properties.download_filename
_download_if_needed(download_path, download_properties.url, show_progress,
disable_ssl_verification)
if download_properties.has_hash_url():
get_logger().info('Downloading hashes for "%s"', download_name)
_, hash_filename, hash_url = download_properties.hashes['hash_url']
_download_if_needed(cache_dir / hash_filename, hash_url, show_progress,
disable_ssl_verification)
def check_downloads(download_info, cache_dir):
"""
Check integrity of the downloads cache.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path to the downloads cache.
Raises source_retrieval.HashMismatchError when the computed and expected hashes do not match.
"""
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Verifying hashes for "%s" ...', download_name)
download_path = cache_dir / download_properties.download_filename
with download_path.open('rb') as file_obj:
archive_data = file_obj.read()
for hash_name, hash_hex in _get_hash_pairs(download_properties, cache_dir):
get_logger().debug('Verifying %s hash...', hash_name)
hasher = hashlib.new(hash_name, data=archive_data)
if not hasher.hexdigest().lower() == hash_hex.lower():
raise HashMismatchError(download_path)
def unpack_downloads(download_info, cache_dir, output_dir, extractors=None):
"""
Unpack downloads in the downloads cache to output_dir. Assumes all downloads are retrieved.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path directory containing the download cache
output_dir is the pathlib.Path directory to unpack the downloads to.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR.
May raise undetermined exceptions during archive unpacking.
"""
for download_name, download_properties in download_info.properties_iter():
download_path = cache_dir / download_properties.download_filename
get_logger().info('Unpacking "%s" to %s ...', download_name,
download_properties.output_path)
extractor_name = download_properties.extractor or ExtractorEnum.TAR
if extractor_name == ExtractorEnum.SEVENZIP:
extractor_func = extract_with_7z
elif extractor_name == ExtractorEnum.WINRAR:
extractor_func = extract_with_winrar
elif extractor_name == ExtractorEnum.TAR:
extractor_func = extract_tar_file
else:
raise NotImplementedError(extractor_name)
if download_properties.strip_leading_dirs is None:
strip_leading_dirs_path = None
else:
strip_leading_dirs_path = Path(download_properties.strip_leading_dirs)
extractor_func(
archive_path=download_path,
output_dir=output_dir / Path(download_properties.output_path),
relative_to=strip_leading_dirs_path,
extractors=extractors)
def _add_common_args(parser):
parser.add_argument(
'-i',
'--ini',
type=Path,
nargs='+',
help='The downloads INI to parse for downloads. Can be specified multiple times.')
parser.add_argument(
'-c', '--cache', type=Path, required=True, help='Path to the directory to cache downloads.')
def _retrieve_callback(args):
retrieve_downloads(
DownloadInfo(args.ini), args.cache, args.show_progress, args.disable_ssl_verification)
try:
check_downloads(DownloadInfo(args.ini), args.cache)
except HashMismatchError as exc:
get_logger().error('File checksum does not match: %s', exc)
sys.exit(1)
def _unpack_callback(args):
extractors = {
ExtractorEnum.SEVENZIP: args.sevenz_path,
ExtractorEnum.WINRAR: args.winrar_path,
ExtractorEnum.TAR: args.tar_path,
}
unpack_downloads(DownloadInfo(args.ini), args.cache, args.output, extractors)
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
add_common_params(parser)
subparsers = parser.add_subparsers(title='Download actions', dest='action')
# retrieve
retrieve_parser = subparsers.add_parser(
'retrieve',
help='Retrieve and check download files',
description=('Retrieves and checks downloads without unpacking. '
'The downloader will attempt to use CLI command "curl". '
'If it is not present, Python\'s urllib will be used. However, only '
'the CLI-based downloaders can be resumed if the download is aborted.'))
_add_common_args(retrieve_parser)
retrieve_parser.add_argument(
'--hide-progress-bar',
action='store_false',
dest='show_progress',
help='Hide the download progress.')
retrieve_parser.add_argument(
'--disable-ssl-verification',
action='store_true',
help='Disables certification verification for downloads using HTTPS.')
retrieve_parser.set_defaults(callback=_retrieve_callback)
# unpack
unpack_parser = subparsers.add_parser(
'unpack',
help='Unpack download files',
description='Verifies hashes of and unpacks download files into the specified directory.')
_add_common_args(unpack_parser)
unpack_parser.add_argument(
'--tar-path',
default='tar',
help=('(Linux and macOS only) Command or path to the BSD or GNU tar '
'binary for extraction. Default: %(default)s'))
unpack_parser.add_argument(
'--7z-path',
dest='sevenz_path',
default=USE_REGISTRY,
help=('Command or path to 7-Zip\'s "7z" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument(
'--winrar-path',
dest='winrar_path',
default=USE_REGISTRY,
help=('Command or path to WinRAR\'s "winrar" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument('output', type=Path, help='The directory to unpack to.')
unpack_parser.set_defaults(callback=_unpack_callback)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
| Eloston/ungoogled-chromium | utils/downloads.py | Python | bsd-3-clause | 17,917 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
VetEpiGIS-Group
A QGIS plugin
Spatial functions for vet epidemiology
-------------------
begin : 2016-05-06
git sha : $Format:%H$
copyright : (C) 2016 by Norbert Solymosi
email : solymosi.norbert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
| IZSVenezie/VetEpiGIS-Group | plugin/__init__.py | Python | gpl-3.0 | 1,137 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
Internal package.
Package containing modules that are used internally by Numenta Python
tools and plugins to extend standard library functionality.
These modules should NOT be used by client applications.
The following modules are included:
nupic.support.paths
Module containing filesystem path manipulation utilities.
nupic.support.serialization
Module containing Python object serialization (pickling and unpickling) and
versioning utilities.
nupic.support.compress
Module containing Python object encoding and compression utilities.
nupic.support.processes
Module containing operating system process management utilities and wrappers.
nupic.support.output
Module containing operating system interprocess communication utilities and
wrappers.
nupic.support.diff
Module containing file difference calculation wrappers.
nupic.support.vision
Temporary location for vision framework before the move to nupic.vision.
nupic.support.deprecate
Contains the deprecate decorator used for automatic handling of deprecated
methods.
nupic.support.memchecker
Contains the MemChecker class, for checking physical memory and monitoring
memory usage.
nupic.support.imagesearch
Contains functions for searching for images on the web and downloading them.
"""
from __future__ import with_statement
# Standard imports
import os
import sys
import inspect
import logging
import logging.config
import logging.handlers
from platform import python_version
import struct
from StringIO import StringIO
import time
import traceback
from configuration import Configuration
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
# Local imports
#############################################################################
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
depth: how far back in the callstack to go to extract the caller info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
#############################################################################
def title(s=None, additional='', stream=sys.stdout, frame='-'):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
@param s - text to display, uses the function name and arguments by default
@param additional - extra text to display (not needed if s is not None)
@param stream - the stream to print to. Ny default goes to standard output
@param frame - the character used for the over and under line. Default is '-'
Examples:
def foo():
title()
will display:
---
foo
---
def foo():
title(additional='(), this is cool!!!')
will display:
----------------------
foo(), this is cool!!!
----------------------
def foo():
title('No function name here!')
will display:
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
method_name = s
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length
#############################################################################
def bringToFront(title):
"""Bring a top-level window with a given title
to the front on Windows"""
if sys.platform != 'win32':
return
import ctypes
find_window = ctypes.windll.user32.FindWindowA
set_foreground_window = ctypes.windll.user32.SetForegroundWindow
hwnd = find_window(None, title)
if hwnd == 0:
raise Exception('There is no window titled: "%s"' % title)
set_foreground_window(hwnd)
#############################################################################
def getUserDocumentsPath():
"""
Find the user's "Documents" directory (OS X), "My Documents" directory
(Windows), or home directory (Unix).
"""
# OS X and Windows code from:
# http://www.blueskyonmars.com/2005/08/05
# /finding-a-users-my-documents-folder-on-windows/
# Alternate Windows code from:
# http://bugs.python.org/issue1763
if sys.platform.startswith('win'):
if sys.platform.startswith('win32'):
# Try the primary method on 32-bit windows
try:
from win32com.shell import shell
alt = False
except ImportError:
try:
import ctypes
dll = ctypes.windll.shell32
alt = True
except:
raise Exception("Could not find 'My Documents'")
else:
# Use the alternate method on 64-bit Windows
alt = True
if not alt:
# Primary method using win32com
df = shell.SHGetDesktopFolder()
pidl = df.ParseDisplayName(0, None,
"::{450d8fba-ad25-11d0-98a8-0800361b1103}")[1]
path = shell.SHGetPathFromIDList(pidl)
else:
# Alternate method using ctypes rather than win32com
buf = ctypes.create_string_buffer(300)
dll.SHGetSpecialFolderPathA(None, buf, 0x0005, False)
path = buf.value
elif sys.platform.startswith('darwin'):
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
Folders.kDocumentsFolderType,
False)
path = folderref.as_pathname()
else:
path = os.getenv('HOME')
return path
#############################################################################
def getArgumentDescriptions(f):
"""
Get the arguments, default values, and argument descriptions for a function.
Returns a list of tuples: (argName, argDescription, defaultValue). If an
argument has no default value, the tuple is only two elements long (as None
cannot be used, since it could be a default value itself).
Parses the argument descriptions out of the function docstring, using a
format something lke this:
[junk]
argument_name: description...
description...
description...
[junk]
[more arguments]
It will find an argument as long as the exact argument name starts the line.
It will then strip a trailing colon, if present, then strip the rest of the
line and use it to start the description. It will then strip and append any
subsequent lines with a greater indent level than the original argument name.
"""
# Get the argument names and default values
argspec = inspect.getargspec(f)
# Scan through the docstring to extract documentation for each argument as
# follows:
# Check the first word of the line, stripping a colon if one is present.
# If it matches an argument name:
# Take the rest of the line, stripping leading whitespeace
# Take each subsequent line if its indentation level is greater than the
# initial indentation level
# Once the indentation level is back to the original level, look for
# another argument
docstring = f.__doc__
descriptions = {}
if docstring:
lines = docstring.split('\n')
i = 0
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
i += 1
continue
# Indentation level is index of the first character
indentLevel = lines[i].index(stripped[0])
# Get the first word and remove the colon, if present
firstWord = stripped.split()[0]
if firstWord.endswith(':'):
firstWord = firstWord[:-1]
if firstWord in argspec.args:
# Found an argument
argName = firstWord
restOfLine = stripped[len(firstWord)+1:].strip()
argLines = [restOfLine]
# Take the next lines as long as they are indented more
i += 1
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
# Empty line - stop
break
if lines[i].index(stripped[0]) <= indentLevel:
# No longer indented far enough - stop
break
# This line counts too
argLines.append(lines[i].strip())
i += 1
# Store this description
descriptions[argName] = ' '.join(argLines)
else:
# Not an argument
i += 1
# Build the list of (argName, description, defaultValue)
args = []
if argspec.defaults:
defaultCount = len(argspec.defaults)
else:
defaultCount = 0
nonDefaultArgCount = len(argspec.args) - defaultCount
for i, argName in enumerate(argspec.args):
if i >= nonDefaultArgCount:
defaultValue = argspec.defaults[i - nonDefaultArgCount]
args.append((argName, descriptions.get(argName, ""), defaultValue))
else:
args.append((argName, descriptions.get(argName, "")))
return args
#############################################################################
# TODO queryNumInwardIters appears to be unused and should probably be deleted
# from here altogether; it's likely an artifact of the legacy vision support.
#def queryNumInwardIters(configPath, radialLength, numRepetitions=1):
# """
# Public utility API that accepts a config path and
# radial length, and determines the proper number of
# training iterations with which to invoke net.run()
# when running a PictureSensor in 'inward' mode.
# """
# numCats = queryNumCategories(configPath)
# sequenceLen = radialLength + 1
# numItersPerCat = (8 * radialLength) * sequenceLen
# numTrainingItersTP = numItersPerCat * numCats
# return numTrainingItersTP * numRepetitions
#############################################################################
gLoggingInitialized = False
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'):
"""
Initilize NuPic logging by reading in from the logging configuration file. The
logging configuration file is named 'nupic-logging.conf' and is expected to be
in the format defined by the python logging module.
If the environment variable 'NTA_CONF_PATH' is defined, then the logging
configuration file is expected to be in the NTA_CONF_PATH directory. If
NTA_CONF_PATH is not defined, then it is found in the 'conf/default'
subdirectory of the NuPic installation directory (typically
~/nta/current/conf/default)
The logging configuration file can use the environment variable 'NTA_LOG_DIR'
to set the locations of log files. If this variable is not defined already in
the environment, this method will set it to the 'logs' subdirectory of the
NuPic install directory (typically ~/nta/eng/logs) before loading in the
configuration file.
console: Defines console output for the default "root" logging
configuration; this may be one of 'stdout', 'stderr', or None;
Use None to suppress console logging output
consoleLevel:
Logging-level filter string for console output corresponding to
logging levels in the logging module; may be one of:
'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.
E.g., a value of'WARNING' suppresses DEBUG and INFO level output
to console, but allows WARNING, ERROR, and CRITICAL
"""
# NOTE: If you call this twice from the same process there seems to be a
# bug - logged messages don't show up for loggers that you do another
# logging.getLogger() on.
global gLoggingInitialized
if gLoggingInitialized:
if verbose:
print >> sys.stderr, "Logging already initialized, doing nothing."
return
consoleStreamMappings = {
'stdout' : 'stdoutConsoleHandler',
'stderr' : 'stderrConsoleHandler',
}
consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL',
'FATAL']
assert console is None or console in consoleStreamMappings.keys(), (
'Unexpected console arg value: %r') % (console,)
assert consoleLevel in consoleLogLevels, (
'Unexpected consoleLevel arg value: %r') % (consoleLevel)
# -----------------------------------------------------------------------
# Setup logging. Look for the nupic-logging.conf file, first in the
# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic
# module
# TODO: move into nupic.support
configFilename = 'nupic-logging.conf'
try:
configFilePath = Configuration.findConfigFile(configFilename)
except:
configFilePath = None
# If NTA_LOG_DIR is not defined, set it now. This is used by the logging
# config file to set the path for the log files
if 'NTA_LOG_DIR' not in os.environ:
os.environ['NTA_LOG_DIR'] = os.path.join(os.environ['NUPIC'], 'logs')
if not os.path.exists(os.environ['NTA_LOG_DIR']):
makeDirectoryFromAbsolutePath(os.path.abspath(os.environ['NTA_LOG_DIR']))
# Load in the logging configuration file
if configFilePath is None:
print >> sys.stderr, (
"WARNING: Could not find the logging configuration file " \
"(filename: '%s', expected to be in search path: %s). Logging is " \
" disabled.") % (configFilename, Configuration.getConfigPaths())
else:
if verbose:
print >> sys.stderr, (
"Using logging configuration file: %s") % (configFilePath)
# This dict will hold our replacement strings for logging configuration
replacements = dict()
def makeKey(name):
""" Makes replacement key """
return "$$%s$$" % (name)
platform = sys.platform.lower()
if platform.startswith('java'):
# Jython
import java.lang
platform = java.lang.System.getProperty("os.name").lower()
if platform.startswith('mac os x'):
platform = 'darwin'
if platform.startswith('darwin'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"'
elif platform.startswith('linux'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"'
else:
raise RuntimeError("This platform is neither darwin nor linux: %s" % (
sys.platform,))
if False: #os.path.isdir('/var/log/numenta/nupic'):
# NOTE: Not using syslogHandler for now because it either truncates or
# drops messages over ~1,400 bytes (depending on platform)
# Nupic logs go to syslog. Also, SysLogHandler raises an exception
# on jython (at least on 2.5.2): "AttributeError: 'module' object has no
# attribute 'AF_UNIX'" (jython is used by a sub-moduleof
# ClientJobManager)
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'syslogHandler'
else:
# Nupic logs go to file
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler'
# Set up log file path for the default file handler
logFilePath = _genLoggingFilePath()
makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath)
# Set up root logger
replacements[makeKey('ROOT_LOGGER_HANDLERS')] = (
replacements[makeKey('PERSISTENT_LOG_HANDLER')])
if console is not None:
replacements[makeKey('ROOT_LOGGER_HANDLERS')] += (
',' + consoleStreamMappings[console])
# Set up log level for console handlers
replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel
customConfig = StringIO()
with open(configFilePath) as src:
for lineNum, line in enumerate(src):
if "$$" in line:
for (key, value) in replacements.items():
line = line.replace(key, value)
# If there is still a replacement string in the line, we're missing it
# from our replacements dict
if "$$" in line and "$$<key>$$" not in line:
raise RuntimeError(("The text %r, found at line #%d of file %r, "
"contains a string not found in our replacement "
"dict.") % (line, lineNum, configFilePath))
customConfig.write(line)
customConfig.seek(0)
if python_version()[:3] >= '2.6':
# NOTE: the disable_existing_loggers arg is new as of Python 2.6, so it's
# not supported on our jython interperter, which was v2.5.x as of this
# writing
logging.config.fileConfig(customConfig, disable_existing_loggers=False)
else:
logging.config.fileConfig(customConfig)
gLoggingInitialized = True
#############################################################################
def reinitLoggingDir():
""" (Re-)Initialize the loging directory for the calling application that
uses initLogging() for logging configuration
NOTE: It's typially unnecessary to call this function directly since
initLogging takes care of it for you. This function is exposed primarily for
the benefit of nupic-services.py to allow it to restore its logging directory
after the hard-reset operation.
"""
if gLoggingInitialized:
makeDirectoryFromAbsolutePath(os.path.dirname(_genLoggingFilePath()))
#############################################################################
def _genLoggingFilePath():
""" Generate a filepath for the calling app """
appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'
appLogDir = os.path.abspath(os.path.join(
os.environ['NTA_LOG_DIR'],
'numenta-logs-%s' % (os.environ['USER'],),
appName))
appLogFileName = '%s-%s-%s.log' % (
appName, long(time.mktime(time.gmtime())), os.getpid())
return os.path.join(appLogDir, appLogFileName)
#############################################################################
def enableLoggingErrorDebugging():
""" Overrides the python logging facility's Handler.handleError function to
raise an exception instead of print and suppressing it. This allows a deeper
stacktrace to be emitted that is very helpful for quickly finding the
file/line that initiated the invalidly-formatted logging operation.
NOTE: This is for debugging only - be sure to remove the call to this function
*before* checking in your changes to the source code repository, as it will
cause the application to fail if some invalidly-formatted logging statement
still exists in your code.
Example usage: enableLoggingErrorDebugging must be called *after*
initLogging()
import nupic.support
nupic.support.initLogging()
nupic.support.enableLoggingErrorDebugging()
"TypeError: not all arguments converted during string formatting" is an
example exception that might be output by the built-in handlers with the
following very shallow traceback that doesn't go deep enough to show the
source of the problem:
File ".../python2.6/logging/__init__.py", line 776, in emit
msg = self.format(record)
File ".../python2.6/logging/__init__.py", line 654, in format
return fmt.format(record)
File ".../python2.6/logging/__init__.py", line 436, in format
record.message = record.getMessage()
File ".../python2.6/logging/__init__.py", line 306, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
"""
print >> sys.stderr, ("WARNING")
print >> sys.stderr, ("WARNING: "
"nupic.support.enableLoggingErrorDebugging() was "
"called to install a debugging patch into all logging handlers that "
"will cause the program to fail if a logging exception occurrs; this "
"call is for debugging only and MUST be removed before checking in code "
"into production system. Caller: %s") % (
traceback.format_stack(),)
print >> sys.stderr, ("WARNING")
def handleErrorPatch(*args, **kwargs):
if logging.raiseExceptions:
raise
for handler in logging._handlerList:
handler.handleError = handleErrorPatch
return
#############################################################################
def clippedObj(obj, maxElementSize=64):
"""
Return a clipped version of obj suitable for printing, This
is useful when generating log messages by printing data structures, but
don't want the message to be too long.
If passed in a dict, list, or namedtuple, each element of the structure's
string representation will be limited to 'maxElementSize' characters. This
will return a new object where the string representation of each element
has been truncated to fit within maxElementSize.
"""
# Is it a named tuple?
if hasattr(obj, '_asdict'):
obj = obj._asdict()
# Printing a dict?
if isinstance(obj, dict):
objOut = dict()
for key,val in obj.iteritems():
objOut[key] = clippedObj(val)
# Printing a list?
elif hasattr(obj, '__iter__'):
objOut = []
for val in obj:
objOut.append(clippedObj(val))
# Some other object
else:
objOut = str(obj)
if len(objOut) > maxElementSize:
objOut = objOut[0:maxElementSize] + '...'
return objOut
###############################################################################
def intTo8ByteArray(inValue):
"""
Converts an int to a packed byte array, with left most significant byte
"""
values = (
(inValue >> 56 ) & 0xff,
(inValue >> 48 ) & 0xff,
(inValue >> 40 ) & 0xff,
(inValue >> 32 ) & 0xff,
(inValue >> 24 ) & 0xff,
(inValue >> 16 ) & 0xff,
(inValue >> 8 ) & 0xff,
inValue & 0xff
)
s = struct.Struct('B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
###############################################################################
def byteArrayToInt(packed_data):
"""
Converts a byte array into an integer
"""
value = struct.unpack('B B B B B B B B', packed_data)
return value[0] << 56 | \
value[1] << 48 | \
value[2] << 40 | \
value[3] << 32 | \
value[4] << 24 | \
value[5] << 16 | \
value[6] << 8 | \
value[7]
###############################################################################
def getSpecialRowID():
"""
Special row id is 0xFF FFFF FFFF FFFF FFFF (9 bytes of 0xFF)
"""
values = (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
s = struct.Struct('B B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
################################################################################
_FLOAT_SECONDS_IN_A_DAY = 24.0 * 60.0 * 60.0
def floatSecondsFromTimedelta(td):
""" Convert datetime.timedelta to seconds in floating point """
sec = (td.days * _FLOAT_SECONDS_IN_A_DAY + td.seconds * 1.0 +
td.microseconds / 1E6)
return sec
#############################################################################
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
Parameters:
---------------------------------------------------------------------
interval: The aggregation interval, as a dict representing a date and time
retval: number of months and seconds in the interval, as a dict:
{months': XX, 'seconds': XX}. The seconds is
a floating point that can represent resolutions down to a
microsecond.
For example:
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds}
#############################################################################
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
Parameters:
---------------------------------------------------------------------
dividend: The numerator, as a dict representing a date and time
divisor: the denominator, as a dict representing a date and time
retval: number of times divisor goes into dividend, as a floating point
number.
For example:
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
| spbguru/repo1 | nupic/support/__init__.py | Python | gpl-3.0 | 27,136 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.tests.common import TransactionCase
from odoo.exceptions import AccessError
class TestEquipmentMulticompany(TransactionCase):
def test_00_equipment_multicompany_user(self):
"""Test Check maintenance with equipment manager and user in multi company environment"""
# Use full models
Equipment = self.env['maintenance.equipment']
MaintenanceRequest = self.env['maintenance.request']
Category = self.env['maintenance.equipment.category']
ResUsers = self.env['res.users']
ResCompany = self.env['res.company']
MaintenanceTeam = self.env['maintenance.team']
# Use full reference.
group_user = self.env.ref('base.group_user')
group_manager = self.env.ref('maintenance.group_equipment_manager')
# Company A
company_a = ResCompany.create({
'name': 'Company A',
'currency_id': self.env.ref('base.USD').id,
})
# Create one child company having parent company is 'Your company'
company_b = ResCompany.create({
'name': 'Company B',
'currency_id': self.env.ref('base.USD').id,
})
# Create equipment manager.
cids = [company_a.id, company_b.id]
equipment_manager = ResUsers.create({
'name': 'Equipment Manager',
'company_id': company_a.id,
'login': 'e_equipment_manager',
'email': 'eqmanager@yourcompany.example.com',
'groups_id': [(6, 0, [group_manager.id])],
'company_ids': [(6, 0, [company_a.id, company_b.id])]
})
# Create equipment user
user = ResUsers.create({
'name': 'Normal User/Employee',
'company_id': company_b.id,
'login': 'emp',
'email': 'empuser@yourcompany.example.com',
'groups_id': [(6, 0, [group_user.id])],
'company_ids': [(6, 0, [company_b.id])]
})
# create a maintenance team for company A user
team = MaintenanceTeam.with_user(equipment_manager).create({
'name': 'Metrology',
'company_id': company_a.id,
})
# create a maintenance team for company B user
teamb = MaintenanceTeam.with_user(equipment_manager).with_context(allowed_company_ids=cids).create({
'name': 'Subcontractor',
'company_id': company_b.id,
})
# User should not able to create equipment category.
with self.assertRaises(AccessError):
Category.with_user(user).create({
'name': 'Software',
'company_id': company_b.id,
'technician_user_id': user.id,
})
# create equipment category for equipment manager
category_1 = Category.with_user(equipment_manager).with_context(allowed_company_ids=cids).create({
'name': 'Monitors - Test',
'company_id': company_b.id,
'technician_user_id': equipment_manager.id,
})
# create equipment category for equipment manager
Category.with_user(equipment_manager).with_context(allowed_company_ids=cids).create({
'name': 'Computers - Test',
'company_id': company_b.id,
'technician_user_id': equipment_manager.id,
})
# create equipment category for equipment user
Category.with_user(equipment_manager).create({
'name': 'Phones - Test',
'company_id': company_a.id,
'technician_user_id': equipment_manager.id,
})
# Check category for user equipment_manager and user
self.assertEqual(Category.with_user(equipment_manager).with_context(allowed_company_ids=cids).search_count([]), 3)
self.assertEqual(Category.with_user(user).search_count([]), 2)
# User should not able to create equipment.
with self.assertRaises(AccessError):
Equipment.with_user(user).create({
'name': 'Samsung Monitor 15',
'category_id': category_1.id,
'assign_date': time.strftime('%Y-%m-%d'),
'company_id': company_b.id,
'owner_user_id': user.id,
})
Equipment.with_user(equipment_manager).with_context(allowed_company_ids=cids).create({
'name': 'Acer Laptop',
'category_id': category_1.id,
'assign_date': time.strftime('%Y-%m-%d'),
'company_id': company_b.id,
'owner_user_id': user.id,
})
# create an equipment for user
Equipment.with_user(equipment_manager).with_context(allowed_company_ids=cids).create({
'name': 'HP Laptop',
'category_id': category_1.id,
'assign_date': time.strftime('%Y-%m-%d'),
'company_id': company_b.id,
'owner_user_id': equipment_manager.id,
})
# Now there are total 2 equipments created and can view by equipment_manager user
self.assertEqual(Equipment.with_user(equipment_manager).with_context(allowed_company_ids=cids).search_count([]), 2)
# And there is total 1 equipment can be view by Normal User ( Which user is followers)
self.assertEqual(Equipment.with_user(user).search_count([]), 1)
# create an equipment team BY user
with self.assertRaises(AccessError):
MaintenanceTeam.with_user(user).create({
'name': 'Subcontractor',
'company_id': company_b.id,
})
# create an equipment category BY user
with self.assertRaises(AccessError):
Category.with_user(user).create({
'name': 'Computers',
'company_id': company_b.id,
'technician_user_id': user.id,
})
# create an maintenance stage BY user
with self.assertRaises(AccessError):
self.env['maintenance.stage'].with_user(user).create({
'name': 'identify corrective maintenance requirements',
})
# Create an maintenance request for ( User Follower ).
MaintenanceRequest.with_user(user).create({
'name': 'Some keys are not working',
'company_id': company_b.id,
'user_id': user.id,
'owner_user_id': user.id,
})
# Create an maintenance request for equipment_manager (Admin Follower)
MaintenanceRequest.with_user(equipment_manager).create({
'name': 'Battery drains fast',
'company_id': company_a.id,
'user_id': equipment_manager.id,
'owner_user_id': equipment_manager.id,
})
# Now here is total 1 maintenance request can be view by Normal User
self.assertEqual(MaintenanceRequest.with_user(equipment_manager).with_context(allowed_company_ids=cids).search_count([]), 2)
self.assertEqual(MaintenanceRequest.with_user(user).search_count([]), 1)
| ddico/odoo | addons/maintenance/tests/test_maintenance_multicompany.py | Python | agpl-3.0 | 7,097 |
#!/usr/bin/python3
import os
import subprocess
import gettext
import pwd
from setproctitle import setproctitle
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("XApp", "1.0")
from gi.repository import Gtk, XApp
# i18n
gettext.install("cinnamon", "/usr/share/locale")
class MainWindow:
''' Create the UI '''
def __init__(self):
user_id = os.getuid()
username = pwd.getpwuid(user_id).pw_name
home_dir = pwd.getpwuid(user_id).pw_dir
self.builder = Gtk.Builder()
self.builder.set_translation_domain('cinnamon') # let it translate!
self.builder.add_from_file("/usr/share/cinnamon/cinnamon-screensaver-lock-dialog/cinnamon-screensaver-lock-dialog.ui")
self.window = self.builder.get_object("main_dialog")
self.button_cancel = self.builder.get_object("button_cancel")
self.button_ok = self.builder.get_object("button_ok")
self.entry = self.builder.get_object("entry_away_message")
self.image = self.builder.get_object("image_face")
self.window.set_title(_("Screen Locker"))
XApp.set_window_icon_name(self.window, "cs-screensaver")
self.builder.get_object("label_description").set_markup("<i>%s</i>" % _("Please type an away message for the lock screen"))
if os.path.exists("%s/.face" % home_dir):
self.image.set_from_file("%s/.face" % home_dir)
else:
self.image.set_from_icon_name("cs-screensaver", Gtk.IconSize.DIALOG)
self.window.connect("destroy", Gtk.main_quit)
self.button_cancel.connect("clicked", Gtk.main_quit)
self.button_ok.connect('clicked', self.lock_screen)
self.entry.connect('activate', self.lock_screen)
self.builder.get_object("dialog-action_area1").set_focus_chain((self.button_ok, self.button_cancel))
self.window.show()
def lock_screen(self, data):
message = self.entry.get_text()
if (message != ""):
subprocess.call(["cinnamon-screensaver-command", "--lock", "--away-message", self.entry.get_text()])
else:
subprocess.call(["cinnamon-screensaver-command", "--lock"])
Gtk.main_quit()
if __name__ == "__main__":
setproctitle("cinnamon-screensaver-lock-dialog")
MainWindow()
Gtk.main()
| glls/Cinnamon | files/usr/share/cinnamon/cinnamon-screensaver-lock-dialog/cinnamon-screensaver-lock-dialog.py | Python | gpl-2.0 | 2,308 |
"""
Spherical Harmonic Coefficients classes
SHCoeffs : SHRealCoeffs, SHComplexCoeffs
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import copy as _copy
import warnings as _warnings
from scipy.special import factorial as _factorial
from .. import shtools as _shtools
from ..spectralanalysis import spectrum as _spectrum
from ..shio import convert as _convert
from ..shio import shread as _shread
# =============================================================================
# ========= COEFFICIENT CLASSES =========================================
# =============================================================================
class SHCoeffs(object):
"""
Spherical Harmonics Coefficient class.
The coefficients of this class can be initialized using one of the
four constructor methods:
x = SHCoeffs.from_array(numpy.zeros((2, lmax+1, lmax+1)))
x = SHCoeffs.from_random(powerspectrum[0:lmax+1])
x = SHCoeffs.from_zeros(lmax)
x = SHCoeffs.from_file('fname.dat')
The normalization convention of the input coefficents is specified
by the normalization and csphase parameters, which take the following
values:
normalization : '4pi' (default), geodesy 4-pi normalized.
: 'ortho', orthonormalized.
: 'schmidt', Schmidt semi-normalized.
: 'unnorm', unnormalized.
csphase : 1 (default), exlcude the Condon-Shortley phase factor.
: -1, include the Condon-Shortley phase factor.
See the documentation for each constructor method for further options.
Once initialized, each class instance defines the following class
attributes:
lmax : The maximum spherical harmonic degree of the coefficients.
coeffs : The raw coefficients with the specified normalization and
csphase conventions.
normalization : The normalization of the coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm'.
csphase : Defines whether the Condon-Shortley phase is used (1)
or not (-1).
mask : A boolean mask that is True for the permissible values of
degree l and order m.
kind : The coefficient data type: either 'complex' or 'real'.
Each class instance provides the following methods:
to_array() : Return an array of spherical harmonic coefficients
with a different normalization convention.
to_file() : Save raw spherical harmonic coefficients as a file.
degrees() : Return an array listing the spherical harmonic
degrees from 0 to lmax.
spectrum() : Return the spectrum of the function as a function
of spherical harmonic degree.
set_coeffs() : Set coefficients in-place to specified values.
rotate() : Rotate the coordinate system used to express the
spherical harmonic coefficients and return a new
class instance.
convert() : Return a new class instance using a different
normalization convention.
pad() : Return a new class instance that is zero padded or
truncated to a different lmax.
expand() : Evaluate the coefficients either on a spherical
grid and return an SHGrid class instance, or for
a list of latitude and longitude coordinates.
copy() : Return a copy of the class instance.
plot_spectrum() : Plot the spectrum as a function of spherical
harmonic degree.
plot_spectrum2d() : Plot the 2D spectrum of all spherical harmonic
coefficients.
info() : Print a summary of the data stored in the SHCoeffs
instance.
"""
def __init__(self):
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> SHCoeffs.from_array?\n'
'>>> SHCoeffs.from_random?\n'
'>>> SHCoeffs.from_zeros?\n'
'>>> SHCoeffs.from_file?\n')
# ---- Factory methods ----
@classmethod
def from_zeros(self, lmax, kind='real', normalization='4pi', csphase=1):
"""
Initialize class with spherical harmonic coefficients set to zero from
degree 0 to lmax.
Usage
-----
x = SHCoeffs.from_zeros(lmax, [normalization, csphase])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
lmax : int
The highest spherical harmonic degree l of the coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
kind : str, optional, default = 'real'
'real' or 'complex' spherical harmonic coefficients.
"""
if kind.lower() not in ('real', 'complex'):
raise ValueError(
"Kind must be 'real' or 'complex'. " +
"Input value was {:s}."
.format(repr(kind))
)
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
nl = lmax + 1
if kind.lower() == 'real':
coeffs = _np.zeros((2, nl, nl))
else:
coeffs = _np.zeros((2, nl, nl), dtype=complex)
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
@classmethod
def from_array(self, coeffs, normalization='4pi', csphase=1, lmax=None,
copy=True):
"""
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
"""
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy)
@classmethod
def from_random(self, power, lmax=None, kind='real', normalization='4pi',
csphase=1, exact_power=False):
"""
Initialize the class with spherical harmonic coefficients as random
variables.
Usage
-----
x = SHCoeffs.from_random(power, [lmax, kind, normalization, csphase,
exact_power])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
power : ndarray, shape (L+1)
numpy array of shape (L+1) that specifies the expected power per
degree l of the random coefficients, where L is the maximum
spherical harmonic bandwidth.
lmax : int, optional, default = len(power) - 1
The highest spherical harmonic degree l of the output coefficients.
The coefficients will be set to zero for degrees greater than L.
kind : str, optional, default = 'real'
'real' or 'complex' spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
exact_power : bool, optional, default = False
The total variance of the coefficients is set exactly to the input
power. This means that the distribution of power at degree l
amongst the angular orders is random, but the total power is fixed.
Description
-----------
This routine returns a random realization of spherical harmonic
coefficients obtained from a normal distribution. The variance of
each coefficient at degree l is equal to the total power at degree
l divided by the number of coefficients at that degree. The power
spectrum of the random realization can be fixed exactly to the input
spectrum using the keyword exact_power.
"""
# check if all arguments are correct
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Provided value was {:s}"
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
if kind.lower() not in ('real', 'complex'):
raise ValueError(
"kind must be 'real' or 'complex'. " +
"Input value was {:s}.".format(repr(kind)))
if lmax is None:
nl = len(power)
lmax = nl - 1
else:
if lmax <= len(power) - 1:
nl = lmax + 1
else:
nl = len(power)
degrees = _np.arange(nl)
if normalization.lower() == 'unnorm' and nl - 1 > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(nl-1),
category=RuntimeWarning)
nl = 85 + 1
lmax = 85
# Create coefficients with unit variance, which returns an expected
# total power per degree of (2l+1).
if kind.lower() == 'real':
coeffs = _np.empty((2, nl, nl))
for l in degrees:
coeffs[:2, l, :l+1] = _np.random.normal(size=(2, l+1))
elif kind.lower() == 'complex':
# - need to divide by sqrt 2 as there are two terms for each coeff.
coeffs = _np.empty((2, nl, nl), dtype=complex)
for l in degrees:
coeffs[:2, l, :l+1] = (_np.random.normal(size=(2, l+1)) +
1j * _np.random.normal(size=(2, l+1))
) / _np.sqrt(2.)
if exact_power:
power_per_l = _spectrum(coeffs, normalization=normalization,
unit='per_l')
coeffs *= _np.sqrt(
power[0:nl] / power_per_l)[_np.newaxis, :, _np.newaxis]
else:
if normalization.lower() == '4pi':
coeffs *= _np.sqrt(
power[0:nl] / (2. * degrees + 1.))[_np.newaxis, :,
_np.newaxis]
elif normalization.lower() == 'ortho':
coeffs *= _np.sqrt(
4. * _np.pi * power[0:nl] / (2. * degrees + 1.)
)[_np.newaxis, :, _np.newaxis]
elif normalization.lower() == 'schmidt':
coeffs *= _np.sqrt(power[0:nl])[_np.newaxis, :, _np.newaxis]
elif normalization.lower() == 'unnorm':
coeffs *= _np.sqrt(power[0:nl])[_np.newaxis, :, _np.newaxis]
for l in degrees:
ms = _np.arange(l+1)
coeffs[:, l, :l+1] *= _np.sqrt(_factorial(l-ms) /
_factorial(l+ms))
if kind.lower() == 'real':
coeffs[:, :, 1:] *= _np.sqrt(2.)
if lmax > nl - 1:
coeffs = _np.pad(coeffs, ((0, 0), (0, lmax - nl + 1),
(0, lmax - nl + 1)), 'constant')
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
@classmethod
def from_file(self, fname, lmax=None, format='shtools', kind='real',
normalization='4pi', skip=0, csphase=1, **kwargs):
"""
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHCoeffs.from_file(filename, format='shtools', [lmax,
normalization,
csphase, skip])
x = SHCoeffs.from_file(filename, format='npy', [normalization,
csphase, **kwargs])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
format : str, optional, default = 'shtools'
'shtools' format or binary numpy 'npy' format.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from 'shtools'
formatted files.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file when format is
'shtools'.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Description
-----------
If format='shtools', spherical harmonic coefficients will be read from
a text file. The optional parameter `skip` specifies how many lines
should be skipped before attempting to parse the file, and the optional
parameter `lmax` specifies the maximum degree to read from the file.
All lines that do not start with 2 integers and that are less than 3
words long will be treated as comments and ignored. For this format,
each line of the file must contain
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. The terms coeffs[1, l, 0] can be neglected as they are
zero. For more information, see `shio.shread()`.
If format='npy', a binary numpy 'npy' file will be read using
numpy.load().
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Provided value was {:s}"
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
if format.lower() == 'shtools':
coeffs, lmaxout = _shread(fname, lmax=lmax, skip=skip)
elif format.lower() == 'npy':
coeffs = _np.load(fname, **kwargs)
else:
raise NotImplementedError(
'format={:s} not yet implemented'.format(repr(format)))
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
def copy(self):
"""Return a deep copy of the class instance."""
return _copy.deepcopy(self)
def to_file(self, filename, format='shtools', **kwargs):
"""
Save raw spherical harmonic coefficients to a file.
Usage
-----
x.to_file(filename, [format, **kwargs])
Parameters
----------
filename : str
Name of the output file.
format : str, optional, default = 'shtools'
'shtools' or 'npy'. See method from_file() for more information.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.save().
"""
if format is 'shtools':
with open(filename, mode='w') as file:
for l in range(self.lmax+1):
for m in range(l+1):
file.write('{:d}, {:d}, {:e}, {:e}\n'
.format(l, m, self.coeffs[0, l, m],
self.coeffs[1, l, m]))
elif format is 'npy':
_np.save(filename, self.coeffs, **kwargs)
else:
raise NotImplementedError(
'format={:s} not yet implemented'.format(repr(format)))
# ---- Mathematical operators ----
def __add__(self, other):
"""
Add two similar sets of coefficients or coefficients and a scalar:
self + other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] +
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not add a complex constant to real ' +
'coefficients.')
coeffs[self.mask] = self.coeffs[self.mask] + other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __radd__(self, other):
"""
Add two similar sets of coefficients or coefficients and a scalar:
other + self.
"""
return self.__add__(other)
def __sub__(self, other):
"""
Subtract two similar sets of coefficients or coefficients and a scalar:
self - other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] -
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from ' +
'real coefficients.')
coeffs[self.mask] = self.coeffs[self.mask] - other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __rsub__(self, other):
"""
Subtract two similar sets of coefficients or coefficients and a scalar:
other - self.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (other.coeffs[self.mask] -
self.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from ' +
'real coefficients.')
coeffs[self.mask] = other - self.coeffs[self.mask]
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __mul__(self, other):
"""
Multiply two similar sets of coefficients or coefficients and a scalar:
self * other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] *
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply real coefficients by ' +
'a complex constant.')
coeffs[self.mask] = self.coeffs[self.mask] * other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __rmul__(self, other):
"""
Multiply two similar sets of coefficients or coefficients and a scalar:
other * self.
"""
return self.__mul__(other)
def __div__(self, other):
"""
Divide two similar sets of coefficients or coefficients and a scalar
when __future__.division is not in effect: self / other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] /
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not divide real coefficients by ' +
'a complex constant.')
coeffs[self.mask] = self.coeffs[self.mask] / other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __truediv__(self, other):
"""
Divide two similar sets of coefficients or coefficients and a scalar
when __future__.division is in effect: self / other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] /
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must be of ' +
'the same kind and have the same ' +
'normalization and csphase.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply real coefficients by ' +
'a complex constant.')
coeffs[self.mask] = self.coeffs[self.mask] / other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __pow__(self, other):
"""
Raise the spherical harmonic coefficients to a scalar power:
pow(self, other).
"""
if _np.isscalar(other) is True:
return SHCoeffs.from_array(pow(self.coeffs, other),
csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
# ---- Extract data ----
def degrees(self):
"""
Return a numpy array with the spherical harmonic degrees from 0 to
lmax.
Usage
-----
degrees = x.degrees()
Returns
-------
degrees : ndarray, shape (lmax+1)
1-D numpy ndarray listing the spherical harmonic degrees, where
lmax is the maximum spherical harmonic degree.
"""
return _np.arange(self.lmax + 1)
def spectrum(self, lmax=None, convention='power', unit='per_l', base=10.):
"""
Return the spectrum as a function of spherical harmonic degree.
Usage
-----
power = x.spectrum([lmax, convention, unit, base])
Returns
-------
power : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
Parameters
----------
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to output.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. For
normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is
the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
return _spectrum(self.coeffs, normalization=self.normalization,
convention=convention, unit=unit, base=base,
lmax=lmax)
# ---- Set individual coefficients ----
def set_coeffs(self, values, ls, ms):
"""
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float or complex (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10.,1,1) # x.coeffs[0,1,1] = 10.
x.set_coeffs([1.,2], [1,2], [0,-2]) # x.coeffs[0,1,0] = 1.
# x.coeffs[1,2,2] = 2.
"""
# Ensure that the type is correct
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
# ---- Return coefficients with a different normalization convention ----
def to_array(self, normalization=None, csphase=None, lmax=None):
"""
Return spherical harmonic coefficients as a numpy array.
Usage
-----
coeffs = x.to_array([normalization, csphase, lmax])
Returns
-------
coeffs : ndarry, shape (2, lmax+1, lmax+1)
numpy ndarray of the spherical harmonic coefficients.
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized,
Schmidt semi-normalized, or unnormalized coefficients,
respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output. If lmax is greater
than x.lmax, the array will be zero padded.
Description
-----------
This method will return an array of the spherical harmonic coefficients
using a different normalization and Condon-Shortley phase convention,
and a different maximum spherical harmonic degree. If the maximum
degree is smaller than the maximum degree of the class instance, the
coefficients will be truncated. Conversely, if this degree is larger
than the maximum degree of the class instance, the output array will be
zero padded.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
coeffs = _convert(self.coeffs, normalization_in=self.normalization,
normalization_out=normalization,
csphase_in=self.csphase, csphase_out=csphase,
lmax=lmax)
return coeffs
# ---- Rotate the coordinate system ----
def rotate(self, alpha, beta, gamma, degrees=True, convention='y',
body=False, dj_matrix=None):
"""
Rotate either the coordinate system used to express the spherical
harmonic coefficients or the physical body, and return a new class
instance.
Usage
-----
x_rotated = x.rotate(alpha, beta, gamma, [degrees, convention,
body, dj_matrix])
Returns
-------
x_rotated : SHCoeffs class instance
Parameters
----------
alpha, beta, gamma : float
The three Euler rotation angles in degrees.
degrees : bool, optional, default = True
True if the Euler angles are in degrees, False if they are in
radians.
convention : str, optional, default = 'y'
The convention used for the rotation of the second angle, which
can be either 'x' or 'y' for a rotation about the x or y axes,
respectively.
body : bool, optional, default = False
If true, rotate the physical body and not the coordinate system.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
Description
-----------
This method will take the spherical harmonic coefficients of a
function, rotate the coordinate frame by the three Euler anlges, and
output the spherical harmonic coefficients of the new function. If
the optional parameter body is set to True, then the physical body will
be rotated instead of the coordinate system.
The rotation of a coordinate system or body can be viewed in two
complementary ways involving three successive rotations. Both methods
have the same initial and final configurations, and the angles listed
in both schemes are the same.
Scheme A:
(I) Rotation about the z axis by alpha.
(II) Rotation about the new y axis by beta.
(III) Rotation about the new z axis by gamma.
Scheme B:
(I) Rotation about the z axis by gamma.
(II) Rotation about the initial y axis by beta.
(III) Rotation about the initial z axis by alpha.
Here, the 'y convention' is employed, where the second rotation is with
respect to the y axis. When using the 'x convention', the second
rotation is instead with respect to the x axis. The relation between
the Euler angles in the x and y conventions is given by
alpha_y=alpha_x-pi/2, beta_y=beta_x, and gamma_y=gamma_x+pi/2.
To perform the inverse transform associated with the three angles
(alpha, beta, gamma), one would perform an additional rotation using
the angles (-gamma, -beta, -alpha).
The rotations can be viewed either as a rotation of the coordinate
system or the physical body. To rotate the physical body without
rotation of the coordinate system, set the optional parameter body to
True. This rotation is accomplished by performing the inverse rotation
using the angles (-gamma, -beta, -alpha).
"""
if type(convention) != str:
raise ValueError('convention must be a string. ' +
'Input type was {:s}'
.format(str(type(convention))))
if convention.lower() not in ('x', 'y'):
raise ValueError(
"convention must be either 'x' or 'y'. " +
"Provided value was {:s}".format(repr(convention))
)
if convention is 'y':
if body is True:
angles = _np.array([-gamma, -beta, -alpha])
else:
angles = _np.array([alpha, beta, gamma])
elif convention is 'x':
if body is True:
angles = _np.array([-gamma - np.pi/2, -beta, -alpha + np.pi/2])
else:
angles = _np.array([alpha - np.pi/2, beta, gamma + np.pi/2])
if degrees:
angles = _np.radians(angles)
if self.lmax > 1200:
_warnings.warn("The rotate() method is accurate only to about" +
" spherical harmonic degree 1200. " +
"lmax = {:d}".format(self.lmax),
category=RuntimeWarning)
rot = self._rotate(angles, dj_matrix)
return rot
# ---- Convert spherical harmonic coefficients to a different normalization
def convert(self, normalization=None, csphase=None, lmax=None, kind=None,
check=True):
"""
Return a SHCoeffs class instance with a different normalization
convention.
Usage
-----
clm = x.convert([normalization, csphase, lmax, kind, check])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention for the output class: 1 to exclude
the phase factor, or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output.
kind : str, optional, default = clm.kind
'real' or 'complex' spherical harmonic coefficients for the output
class.
check : bool, optional, default = True
When converting complex coefficients to real coefficients, if True,
check if function is entirely real.
Description
-----------
This method will return a new class instance of the spherical
harmonic coefficients using a different normalization and
Condon-Shortley phase convention. The coefficients can be converted
between real and complex form, and a different maximum spherical
harmonic degree of the output coefficients can be specified. If this
maximum degree is smaller than the maximum degree of the original
class, the coefficients will be truncated. Conversely, if this degree
is larger than the maximum degree of the original class, the
coefficients of the new class will be zero padded.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
if kind is None:
kind = self.kind
# check argument consistency
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Provided value was {:s}"
.format(repr(normalization)))
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase)))
if (kind != self.kind):
if (kind == 'complex'):
temp = self._make_complex()
else:
temp = self._make_real(check=check)
coeffs = temp.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax)
else:
coeffs = self.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax)
return SHCoeffs.from_array(coeffs,
normalization=normalization.lower(),
csphase=csphase, copy=False)
# ---- Return a SHCoeffs class instance zero padded up to lmax
def pad(self, lmax):
"""
Return a SHCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
"""
clm = self.copy()
if lmax <= self.lmax:
clm.coeffs = clm.coeffs[:, :lmax+1, :lmax+1]
else:
clm.coeffs = _np.pad(clm.coeffs, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
clm.lmax = lmax
return clm
# ---- Expand the coefficients onto a grid ----
def expand(self, grid='DH', lat=None, lon=None, degrees=True, zeros=None,
lmax=None, lmax_calc=None):
"""
Evaluate the spherical harmonic coefficients either on a grid or for
a list of coordinates.
Usage
-----
f = x.expand(lat, lon, [lmax_calc, degrees])
g = x.expand([grid, lmax, lmax_calc, zeros])
Returns
-------
f : float, ndarray, or list
g : SHGrid class instance
Parameters
----------
lat, lon : int, float, ndarray, or list, optional, default = None
Latitude and longitude coordinates where the function is to be
evaluated.
degrees : bool, optional, default = True
True if lat and lon are in degrees, False if in radians.
grid : str, optional, default = 'DH'
'DH' or 'DH1' for an equisampled lat/lon grid with nlat=nlon,
'DH2' for an equidistant lat/lon grid with nlon=2*nlat, or 'GLQ'
for a Gauss-Legendre quadrature grid.
lmax : int, optional, default = x.lmax
The maximum spherical harmonic degree, which determines the grid
spacing of the output grid.
lmax_calc : int, optional, default = x.lmax
The maximum spherical harmonic degree to use when evaluating the
function.
zeros : ndarray, optional, default = None
The cos(colatitude) nodes used in the Gauss-Legendre Quadrature
grids.
Description
-----------
For more information concerning the spherical harmonic expansions and
the properties of the output grids, see the documentation for
SHExpandDH, SHExpandDHC, SHExpandGLQ and SHExpandGLQC.
"""
if lat is not None and lon is not None:
if lmax_calc is None:
lmax_calc = self.lmax
values = self._expand_coord(lat=lat, lon=lon, degrees=degrees,
lmax_calc=lmax_calc)
return values
else:
if lmax is None:
lmax = self.lmax
if lmax_calc is None:
lmax_calc = lmax
if type(grid) != str:
raise ValueError('grid must be a string. ' +
'Input type was {:s}'
.format(str(type(grid))))
if grid.upper() in ('DH', 'DH1'):
gridout = self._expandDH(sampling=1, lmax=lmax,
lmax_calc=lmax_calc)
elif grid.upper() == 'DH2':
gridout = self._expandDH(sampling=2, lmax=lmax,
lmax_calc=lmax_calc)
elif grid.upper() == 'GLQ':
gridout = self._expandGLQ(zeros=zeros, lmax=lmax,
lmax_calc=lmax_calc)
else:
raise ValueError(
"grid must be 'DH', 'DH1', 'DH2', or 'GLQ'. " +
"Input value was {:s}".format(repr(grid)))
return gridout
# ---- Plotting routines ----
def plot_spectrum(self, convention='power', unit='per_l', base=10.,
xscale='lin', yscale='log', show=True, ax=None,
fname=None):
"""
Plot the spectrum as a function of spherical harmonic degree.
Usage
-----
x.plot_spectrum([convention, unit, base, xscale, yscale, show, ax,
fname])
Parameters
----------
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', plot the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', plot the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', plot the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum, and
the base to use for logarithmic axes.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
spectrum = self.spectrum(convention=convention, unit=unit, base=base)
ls = self.degrees()
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
axes.set_xlabel('degree l')
if convention == 'energy':
axes.set_ylabel('energy')
if (unit == 'per_l'):
legend = 'energy per degree'
elif (unit == 'per_lm'):
legend = 'energy per coefficient'
elif (unit == 'per_dlogl'):
legend = 'energy per log bandwidth'
elif convention == 'l2norm':
axes.set_ylabel('l2 norm')
if (unit == 'per_l'):
legend = 'l2 norm per degree'
elif (unit == 'per_lm'):
legend = 'l2 norm per coefficient'
elif (unit == 'per_dlogl'):
legend = 'l2 norm per log bandwidth'
else:
axes.set_ylabel('power')
if (unit == 'per_l'):
legend = 'power per degree'
elif (unit == 'per_lm'):
legend = 'power per coefficient'
elif (unit == 'per_dlogl'):
legend = 'power per log bandwidth'
axes.grid(True, which='both')
if xscale == 'log':
axes.set_xscale('log', basex=base)
if yscale == 'log':
axes.set_yscale('log', basey=base)
if xscale == 'log':
axes.plot(ls[1:], spectrum[1:], label=legend)
else:
axes.plot(ls, spectrum, label=legend)
axes.legend()
if show:
_plt.show()
if ax is None:
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_spectrum2d(self, convention='power', xscale='lin', yscale='lin',
vscale='log', vrange=(1.e-5, 1.0), show=True,
ax=None, fname=None):
"""
Plot the spectrum of all spherical harmonic coefficients.
Usage
-----
x.plot_spectrum2d([convention, xscale, yscale, vscale, vrange, show,
ax, fname])
Parameters
----------
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
xscale : str, optional, default = 'lin'
Scale of the l axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'lin'
Scale of the m axis: 'lin' for linear or 'log' for logarithmic.
vscale : str, optional, default = 'log'
Scale of the color axis: 'lin' for linear or 'log' for logarithmic.
vrange : (float, float), optional, default = (1.e-5, 1.)
Colormap range relative to the maximum value.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
# Create the matrix of the spectrum for each coefficient
spectrum = _np.empty((self.lmax + 1, 2 * self.lmax + 1))
mpositive = _np.abs(self.coeffs[0])**2
mpositive[~self.mask[0]] = _np.nan
mnegative = _np.abs(self.coeffs[1])**2
mnegative[~self.mask[1]] = _np.nan
spectrum[:, :self.lmax] = _np.fliplr(mnegative)[:, :self.lmax]
spectrum[:, self.lmax:] = mpositive
if (convention.lower() == 'l2norm'):
if self.normalization == 'unnorm':
raise ValueError("convention can not be set to 'l2norm' " +
"when using unnormalized harmonics.")
else:
pass
elif convention.lower() in ('power', 'energy'):
if self.normalization == '4pi':
pass
elif self.normalization == 'schmidt':
for l in self.degrees():
spectrum[l, :] /= (2. * l + 1.)
elif self.normalization == 'ortho':
for l in self.degrees():
spectrum[l, :] /= (4. * _np.pi)
elif self.normalization == 'unnorm':
for l in self.degrees():
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if self.kind == 'real':
conv[1:l + 1] = conv[1:l + 1] / 2.
spectrum[l, self.lmax-l:self.lmax] *= conv[::-1][0:l]
spectrum[l, self.lmax:self.lmax+l+1] *= conv[0:l+1]
else:
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
else:
raise ValueError(
"convention must be 'power', 'energy', or 'l2norm'. " +
"Input value was {:s}".format(repr(convention)))
if convention == 'energy':
spectrum *= 4.0 * _np.pi
spectrum_masked = _np.ma.masked_invalid(spectrum)
# need to add one extra value to each in order for pcolormesh
# to plot the last row and column.
ls = _np.arange(self.lmax+2).astype(_np.float)
ms = _np.arange(-self.lmax, self.lmax + 2, dtype=_np.float)
lgrid, mgrid = _np.meshgrid(ls, ms, indexing='ij')
lgrid -= 0.5
mgrid -= 0.5
if ax is None:
fig, axes = _plt.subplots()
else:
axes = ax
vmin = _np.nanmax(spectrum) * vrange[0]
vmax = _np.nanmax(spectrum) * vrange[1]
if vscale.lower() == 'log':
norm = _mpl.colors.LogNorm(vmin, vmax, clip=True)
# Clipping is required to avoid an invalid value error
elif vscale.lower() == 'lin':
norm = _plt.Normalize(vmin, vmax)
else:
raise ValueError(
"vscale must be 'lin' or 'log'. " +
"Input value was {:s}".format(repr(vscale)))
if (xscale == 'lin'):
cmesh = axes.pcolormesh(lgrid, mgrid, spectrum_masked,
norm=norm, cmap='viridis')
axes.set(xlim=(-0.5, self.lmax + 0.5))
elif (xscale == 'log'):
cmesh = axes.pcolormesh(lgrid[1:], mgrid[1:], spectrum_masked[1:],
norm=norm, cmap='viridis')
axes.set(xscale='log', xlim=(1., self.lmax + 0.5))
else:
raise ValueError(
"xscale must be 'lin' or 'log'. " +
"Input value was {:s}".format(repr(xscale)))
if (yscale == 'lin'):
axes.set(ylim=(-self.lmax - 0.5, self.lmax + 0.5))
elif (yscale == 'log'):
axes.set(yscale='symlog', ylim=(-self.lmax - 0.5, self.lmax + 0.5))
else:
raise ValueError(
"yscale must be 'lin' or 'log'. " +
"Input value was {:s}".format(repr(yscale)))
if ax is None:
cb = _plt.colorbar(cmesh)
if (convention == 'energy'):
cb.set_label('energy per coefficient')
elif (convention == 'power'):
cb.set_label('power per coefficient')
else:
cb.set_label('magnitude-squared coefficient')
else:
cb = _plt.colorbar(cmesh, ax=ax)
if (convention == 'energy'):
cb.set_label('energy per coefficient')
elif (convention == 'power'):
cb.set_label('power per coefficient')
else:
cb.set_label('magnitude-squared coefficient')
cb.ax.tick_params(width=0.2)
axes.set(xlabel='degree l', ylabel='order m')
axes.grid(True, which='both')
if ax is None:
if show:
_plt.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def info(self):
"""
Print a summary of the data stored in the SHCoeffs instance.
Usage
-----
x.info()
"""
print('kind = {:s}\nnormalization = {:s}\n'
'csphase = {:d}\nlmax = {:d}'.format(
repr(self.kind), repr(self.normalization), self.csphase,
self.lmax))
# ================== REAL SPHERICAL HARMONICS ================
class SHRealCoeffs(SHCoeffs):
"""Real Spherical Harmonics Coefficient class."""
@staticmethod
def istype(kind):
"""Test if class is Real or Complex."""
return kind == 'real'
def __init__(self, coeffs, normalization='4pi', csphase=1, copy=True):
"""Initialize Real SH Coefficients."""
lmax = coeffs.shape[1] - 1
# ---- create mask to filter out m<=l ----
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
mask[0, 0, 0] = True
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
self.mask = mask
self.lmax = lmax
self.kind = 'real'
self.normalization = normalization
self.csphase = csphase
if copy:
self.coeffs = _np.copy(coeffs)
self.coeffs[~mask] = 0.
else:
self.coeffs = coeffs
def _make_complex(self):
"""Convert the real SHCoeffs class to the complex class."""
rcomplex_coeffs = _shtools.SHrtoc(self.coeffs,
convention=1, switchcs=0)
# These coefficients are using real floats, and need to be
# converted to complex form.
complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1),
dtype='complex')
complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j *
rcomplex_coeffs[1, :, :])
complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate()
for m in self.degrees():
if m % 2 == 1:
complex_coeffs[1, :, m] = - complex_coeffs[1, :, m]
# complex_coeffs is initialized in this function and can be
# passed as reference
return SHCoeffs.from_array(complex_coeffs,
normalization=self.normalization,
csphase=self.csphase, copy=False)
def _rotate(self, angles, dj_matrix):
"""Rotate the coefficients by the Euler angles alpha, beta, gamma."""
if dj_matrix is None:
dj_matrix = _shtools.djpi2(self.lmax + 1)
# The coefficients need to be 4pi normalized with csphase = 1
coeffs = _shtools.SHRotateRealCoef(
self.to_array(normalization='4pi', csphase=1), angles, dj_matrix)
# Convert 4pi normalized coefficients to the same normalization
# as the unrotated coefficients.
if self.normalization != '4pi' or self.csphase != 1:
temp = SHCoeffs.from_array(coeffs, normalization='4pi', csphase=1)
tempcoeffs = temp.to_array(
normalization=self.normalization, csphase=self.csphase)
return SHCoeffs.from_array(
tempcoeffs, normalization=self.normalization,
csphase=self.csphase, copy=False)
else:
return SHCoeffs.from_array(coeffs, copy=False)
def _expandDH(self, sampling, lmax, lmax_calc):
"""Evaluate the coefficients on a Driscoll and Healy (1994) grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
data = _shtools.MakeGridDH(self.coeffs, sampling=sampling, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='DH', copy=False)
return gridout
def _expandGLQ(self, zeros, lmax, lmax_calc):
"""Evaluate the coefficients on a Gauss Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if zeros is None:
zeros, weights = _shtools.SHGLQ(self.lmax)
data = _shtools.MakeGridGLQ(self.coeffs, zeros, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout
def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float:
return _shtools.MakeGridPoint(self.coeffs, lat=latin, lon=lonin,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=float)
for v, latitude, longitude in _np.nditer([values, latin, lonin],
op_flags=['readwrite']):
v[...] = _shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
values.append(
_shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase))
return values
else:
raise ValueError('lat and lon must be either an int, float, ' +
'ndarray, or list. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon))))
# =============== COMPLEX SPHERICAL HARMONICS ================
class SHComplexCoeffs(SHCoeffs):
"""Complex Spherical Harmonics Coefficients class."""
@staticmethod
def istype(kind):
"""Check if class has kind 'real' or 'complex'."""
return kind == 'complex'
def __init__(self, coeffs, normalization='4pi', csphase=1, copy=True):
"""Initialize Complex coefficients."""
lmax = coeffs.shape[1] - 1
# ---- create mask to filter out m<=l ----
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
mask[0, 0, 0] = True
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
self.mask = mask
self.lmax = lmax
self.kind = 'complex'
self.normalization = normalization
self.csphase = csphase
if copy:
self.coeffs = _np.copy(coeffs)
self.coeffs[~mask] = 0.
else:
self.coeffs = coeffs
def _make_real(self, check=True):
"""Convert the complex SHCoeffs class to the real class."""
# Test if the coefficients correspond to a real grid.
# This is not very elegant, and the equality condition
# is probably not robust to round off errors.
if check:
for l in self.degrees():
if self.coeffs[0, l, 0] != self.coeffs[0, l, 0].conjugate():
raise RuntimeError('Complex coefficients do not ' +
'correspond to a real field. ' +
'l = {:d}, m = 0: {:e}'
.format(l, self.coeffs[0, l, 0]))
for m in _np.arange(1, l + 1):
if m % 2 == 1:
if (self.coeffs[0, l, m] != -
self.coeffs[1, l, m].conjugate()):
raise RuntimeError('Complex coefficients do not ' +
'correspond to a real field. ' +
'l = {:d}, m = {:d}: {:e}, {:e}'
.format(
l, m, self.coeffs[0, l, 0],
self.coeffs[1, l, 0]))
else:
if (self.coeffs[0, l, m] !=
self.coeffs[1, l, m].conjugate()):
raise RuntimeError('Complex coefficients do not ' +
'correspond to a real field. ' +
'l = {:d}, m = {:d}: {:e}, {:e}'
.format(
l, m, self.coeffs[0, l, 0],
self.coeffs[1, l, 0]))
coeffs_rc = _np.zeros((2, self.lmax + 1, self.lmax + 1))
coeffs_rc[0, :, :] = self.coeffs[0, :, :].real
coeffs_rc[1, :, :] = self.coeffs[0, :, :].imag
real_coeffs = _shtools.SHctor(coeffs_rc, convention=1,
switchcs=0)
return SHCoeffs.from_array(real_coeffs,
normalization=self.normalization,
csphase=self.csphase)
def _rotate(self, angles, dj_matrix):
"""Rotate the coefficients by the Euler angles alpha, beta, gamma."""
# Note that the current method is EXTREMELY inefficient. The complex
# coefficients are expanded onto real and imaginary grids, each of
# the two components are rotated separately as real data, the rotated
# real data are re-expanded on new real and complex grids, they are
# combined to make a complex grid, and the resultant is expanded
# in complex spherical harmonics.
if dj_matrix is None:
dj_matrix = _shtools.djpi2(self.lmax + 1)
cgrid = self.expand(grid='DH')
rgrid, igrid = cgrid.data.real, cgrid.data.imag
rgridcoeffs = _shtools.SHExpandDH(rgrid, norm=1, sampling=1, csphase=1)
igridcoeffs = _shtools.SHExpandDH(igrid, norm=1, sampling=1, csphase=1)
rgridcoeffs_rot = _shtools.SHRotateRealCoef(
rgridcoeffs, angles, dj_matrix)
igridcoeffs_rot = _shtools.SHRotateRealCoef(
igridcoeffs, angles, dj_matrix)
rgrid_rot = _shtools.MakeGridDH(rgridcoeffs_rot, norm=1,
sampling=1, csphase=1)
igrid_rot = _shtools.MakeGridDH(igridcoeffs_rot, norm=1,
sampling=1, csphase=1)
grid_rot = rgrid_rot + 1j * igrid_rot
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
coeffs_rot = _shtools.SHExpandDHC(grid_rot, norm=norm,
csphase=self.csphase)
return SHCoeffs.from_array(coeffs_rot,
normalization=self.normalization,
csphase=self.csphase, copy=False)
def _expandDH(self, sampling, lmax, lmax_calc):
"""Evaluate the coefficients on a Driscoll and Healy (1994) grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
data = _shtools.MakeGridDHC(self.coeffs, sampling=sampling,
norm=norm, csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='DH', copy=False)
return gridout
def _expandGLQ(self, zeros, lmax, lmax_calc):
"""Evaluate the coefficients on a Gauss-Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if zeros is None:
zeros, weights = _shtools.SHGLQ(self.lmax)
data = _shtools.MakeGridGLQC(self.coeffs, zeros, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout
def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float:
return _shtools.MakeGridPointC(self.coeffs, lat=latin, lon=lonin,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=float)
for v, latitude, longitude in _np.nditer([values, latin, lonin],
op_flags=['readwrite']):
v[...] = _shtools.MakeGridPointC(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
values.append(
_shtools.MakeGridPointC(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase))
return values
else:
raise ValueError('lat and lon must be either an int, float, ' +
'ndarray, or list. ' +
'Input types are {:s} and {:s}'
.format(repr(type(lat)), repr(type(lon))))
# =============================================================================
# ========= GRID CLASSES ================================================
# =============================================================================
class SHGrid(object):
"""
Class for spatial gridded data on the sphere.
Grids can be initialized from:
x = SHGrid.from_array(array)
x = SHGrid.from_file('fname.dat')
The class instance defines the following class attributes:
data : Gridded array of the data.
nlat, nlon : The number of latitude and longitude bands in the grid.
lmax : The maximum spherical harmonic degree that can be resolved
by the grid sampling.
sampling : For Driscoll and Healy grids, the longitudinal sampling
of the grid. Either 1 for nlong=nlat or 2 for nlong=2*nlat.
kind : Either 'complex' or 'real' for the data type.
grid : Either 'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-
Legendre Quadrature grids.
zeros : The cos(colatitude) nodes used with Gauss-Legendre
Quadrature grids. Default is None.
weights : The latitudinal weights used with Gauss-Legendre
Quadrature grids. Default is None.
Each class instance provides the following methods:
to_array() : Return the raw gridded data as a numpy array.
to_file() : Save gridded data to a text or binary file.
lats() : Return a vector containing the latitudes of each row
of the gridded data.
lons() : Return a vector containing the longitudes of each column
of the gridded data.
expand() : Expand the grid into spherical harmonics.
copy() : Return a copy of the class instance.
plot() : Plot the raw data using a simple cylindrical projection.
plot3d() : Plot the raw data on a 3d sphere.
info() : Print a summary of the data stored in the SHGrid instance.
"""
def __init__():
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> SHGrid.from_array?\n'
'>>> SHGrid.from_file?\n')
# ---- Factory methods ----
@classmethod
def from_array(self, array, grid='DH', copy=True):
"""
Initialize the class instance from an input array.
Usage
-----
x = SHGrid.from_array(array, [grid, copy])
Returns
-------
x : SHGrid class instance
Parameters
----------
array : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data, where nlat and nlon are the
number of latitudinal and longitudinal bands, respectively.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre
Quadrature grids, respectively.
copy : bool, optional, default = True
If True (default), make a copy of array when initializing the class
instance. If False, initialize the class instance with a reference
to array.
"""
if _np.iscomplexobj(array):
kind = 'complex'
else:
kind = 'real'
if type(grid) != str:
raise ValueError('grid must be a string. ' +
'Input type was {:s}'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError(
"grid must be 'DH' or 'GLQ'. Input value was {:s}."
.format(repr(grid))
)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, copy=copy)
@classmethod
def from_file(self, fname, binary=False, **kwargs):
"""
Initialize the class instance from gridded data in a file.
Usage
-----
x = SHGrid.from_file(fname, [binary, **kwargs])
Returns
-------
x : SHGrid class instance
Parameters
----------
fname : str
The filename containing the gridded data. For text files (default)
the file is read using the numpy routine loadtxt(), whereas for
binary files, the file is read using numpy.load(). The dimensions
of the array must be nlon=nlat or nlon=2*nlat for Driscoll and
Healy grids, or nlon=2*nlat-1 for Gauss-Legendre Quadrature grids.
binary : bool, optional, default = False
If False, read a text file. If True, read a binary 'npy' file.
**kwargs : keyword arguments, optional
Keyword arguments of numpy.loadtxt() or numpy.load().
"""
if binary is False:
data = _np.loadtxt(fname, **kwargs)
elif binary is True:
data = _np.load(fname, **kwargs)
else:
raise ValueError('binary must be True or False. '
'Input value is {:s}'.format(binary))
if _np.iscomplexobj(data):
kind = 'complex'
else:
kind = 'real'
if (data.shape[1] == data.shape[0]) or (data.shape[1] ==
2 * data.shape[0]):
grid = 'DH'
elif data.shape[1] == 2 * data.shape[0] - 1:
grid = 'GLQ'
else:
raise ValueError('Input grid must be dimensioned as ' +
'(nlat, nlon). For DH grids, nlon = nlat or ' +
'nlon = 2 * nlat. For GLQ grids, nlon = ' +
'2 * nlat - 1. Input dimensions are nlat = ' +
'{:d}, nlon = {:d}'.format(data.shape[0],
data.shape[1]))
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(data)
def copy(self):
"""Return a deep copy of the class instance."""
return _copy.deepcopy(self)
def to_file(self, filename, binary=False, **kwargs):
"""
Save gridded data to a file.
Usage
-----
x.to_file(filename, [binary, **kwargs])
Parameters
----------
filename : str
Name of output file. For text files (default), the file will be
saved automatically in gzip compressed format if the filename ends
in .gz.
binary : bool, optional, default = False
If False, save as text using numpy.savetxt(). If True, save as a
'npy' binary file using numpy.save().
**kwargs : keyword arguments, optional
Keyword arguments of numpy.savetxt() and numpy.save().
"""
if binary is False:
_np.savetxt(filename, self.data, **kwargs)
elif binary is True:
_np.save(filename, self.data, **kwargs)
else:
raise ValueError('binary must be True or False. '
'Input value is {:s}'.format(binary))
# ---- Mathematical operators ----
def __add__(self, other):
"""Add two similar grids or a grid and a scaler: self + other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data + other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not add a complex constant to a ' +
'real grid.')
data = self.data + other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __radd__(self, other):
"""Add two similar grids or a grid and a scaler: self + other."""
return self.__add__(other)
def __sub__(self, other):
"""Subtract two similar grids or a grid and a scaler: self - other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data - other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from ' +
'a real grid.')
data = self.data - other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __rsub__(self, other):
"""Subtract two similar grids or a grid and a scaler: other - self."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = other.data - self.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from ' +
'a real grid.')
data = other - self.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __mul__(self, other):
"""Multiply two similar grids or a grid and a scaler: self * other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data * other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply a real grid by a complex ' +
'constant.')
data = self.data * other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __rmul__(self, other):
"""Multiply two similar grids or a grid and a scaler: other * self."""
return self.__mul__(other)
def __div__(self, other):
"""
Divide two similar grids or a grid and a scalar, when
__future__.division is not in effect.
"""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data / other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not divide a real grid by a complex ' +
'constant.')
data = self.data / other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __truediv__(self, other):
"""
Divide two similar grids or a grid and a scalar, when
__future__.division is in effect.
"""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data / other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the ' +
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not divide a real grid by a complex ' +
'constant.')
data = self.data / other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __pow__(self, other):
"""Raise a grid to a scalar power: pow(self, other)."""
if _np.isscalar(other) is True:
return SHGrid.from_array(pow(self.data, other), grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented' +
'for these operands.')
def __abs__(self):
"""Return the absolute value of the gridded data."""
return SHGrid.from_array(abs(self.data), grid=self.grid)
# ---- Extract grid properties ----
def lats(self, degrees=True):
"""
Return the latitudes of each row of the gridded data.
Usage
-----
lats = x.lats([degrees])
Returns
-------
lats : ndarray, shape (nlat)
1-D numpy array of size nlat containing the latitude of each row
of the gridded data.
Parameters
-------
degrees : bool, optional, default = True
If True, the output will be in degrees. If False, the output will
be in radians.
"""
if degrees is False:
return _np.radians(self._lats())
else:
return self._lats()
def lons(self, degrees=True):
"""
Return the longitudes of each column of the gridded data.
Usage
-----
lons = x.get_lon([degrees])
Returns
-------
lons : ndarray, shape (nlon)
1-D numpy array of size nlon containing the longitude of each row
of the gridded data.
Parameters
-------
degrees : bool, optional, default = True
If True, the output will be in degrees. If False, the output will
be in radians.
"""
if degrees is False:
return _np.radians(self._lons())
else:
return self._lons()
def to_array(self):
"""
Return the raw gridded data as a numpy array.
Usage
-----
grid = x.to_array()
Returns
-------
grid : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data.
"""
return _np.copy(self.data)
def plot3d(self, elevation=0, azimuth=0, show=True, fname=None):
"""
Plot the raw data on a 3d sphere.
This routines becomes slow for large grids because it is based on
matplotlib3d.
Usage
-----
x.plot3d([elevation, azimuth, show, fname])
Parameters
----------
elevation : float, optional, default = 0
elev parameter for the 3d projection.
azimuth : float, optional, default = 0
azim parameter for the 3d projection.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, save the image to the specified file.
"""
from mpl_toolkits.mplot3d import Axes3D # NOQA
nlat, nlon = self.nlat, self.nlon
cmap = _plt.get_cmap('RdBu_r')
if self.kind == 'real':
data = self.data
elif self.kind == 'complex':
data = _np.abs(self.data)
else:
raise ValueError('Grid has to be either real or complex, not {}'
.format(self.kind))
lats = self.lats()
lons = self.lons()
if self.grid == 'DH':
# add south pole
lats_circular = _np.append(lats, [-90.])
elif self.grid == 'GLQ':
# add north and south pole
lats_circular = _np.hstack(([90.], lats, [-90.]))
lons_circular = _np.append(lons, [lons[0]])
nlats_circular = len(lats_circular)
nlons_circular = len(lons_circular)
sshape = nlats_circular, nlons_circular
# make uv sphere and store all points
u = _np.radians(lons_circular)
v = _np.radians(90. - lats_circular)
x = _np.sin(v)[:, None] * _np.cos(u)[None, :]
y = _np.sin(v)[:, None] * _np.sin(u)[None, :]
z = _np.cos(v)[:, None] * _np.ones_like(lons_circular)[None, :]
points = _np.vstack((x.flatten(), y.flatten(), z.flatten()))
# fill data for all points. 0 lon has to be repeated (circular mesh)
# and the south pole has to be added in the DH grid
if self.grid == 'DH':
magn_point = _np.zeros((nlat + 1, nlon + 1))
magn_point[:-1, :-1] = data
magn_point[-1, :] = _np.mean(data[-1]) # not exact !
magn_point[:-1, -1] = data[:, 0]
if self.grid == 'GLQ':
magn_point = _np.zeros((nlat + 2, nlon + 1))
magn_point[1:-1, :-1] = data
magn_point[0, :] = _np.mean(data[0]) # not exact !
magn_point[-1, :] = _np.mean(data[-1]) # not exact !
magn_point[1:-1, -1] = data[:, 0]
# compute face color, which is the average of all neighbour points
magn_face = 1./4. * (magn_point[1:, 1:] + magn_point[:-1, 1:] +
magn_point[1:, :-1] + magn_point[:-1, :-1])
magnmax_face = _np.max(_np.abs(magn_face))
magnmax_point = _np.max(_np.abs(magn_point))
# compute colours and displace the points
norm = _plt.Normalize(-magnmax_face / 2., magnmax_face / 2., clip=True)
colors = cmap(norm(magn_face.flatten()))
colors = colors.reshape(nlats_circular - 1, nlons_circular - 1, 4)
points *= (1. + magn_point.flatten() / magnmax_point / 2.)
x = points[0].reshape(sshape)
y = points[1].reshape(sshape)
z = points[2].reshape(sshape)
# plot 3d radiation pattern
fig = _plt.figure(figsize=(10, 10))
ax3d = fig.add_subplot(1, 1, 1, projection='3d')
ax3d.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=colors)
ax3d.set(xlim=(-1.5, 1.5), ylim=(-1.5, 1.5), zlim=(-1.5, 1.5),
xticks=[-1, 1], yticks=[-1, 1], zticks=[-1, 1])
ax3d.set_axis_off()
ax3d.view_init(elev=elevation, azim=azimuth)
# show or save output
if show:
_plt.show()
if fname is not None:
fig.savefig(fname)
return fig, ax3d
# ---- Plotting routines ----
def plot(self, tick_interval=[30, 30], ax=None, ax2=None, show=True,
fname=None, **kwargs):
"""
Plot the raw data using a simple cylindrical projection.
Usage
-----
x.plot([tick_interval, ax, ax2, show, fname])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude' or 'GLQ longitude index'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude' or 'GLQ latitude index'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear. If the
grid is complex, the real component of the grid will be plotted
on this axes.
ax2 : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear. If the
grid is complex, the complex component of the grid will be plotted
on this axes.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if tick_interval is None:
xticks = []
yticks = []
elif self.grid == 'GLQ':
xticks = _np.linspace(0, self.nlon-1,
num=self.nlon//tick_interval[0]+1,
endpoint=True, dtype=int)
yticks = _np.linspace(0, self.nlat-1,
num=self.nlat//tick_interval[1]+1,
endpoint=True, dtype=int)
else:
xticks = _np.linspace(0, 360, num=360//tick_interval[0]+1,
endpoint=True)
yticks = _np.linspace(-90, 90, num=180//tick_interval[1]+1,
endpoint=True)
if ax is None and ax2 is None:
fig, axes = self._plot(xticks=xticks, yticks=yticks, **kwargs)
else:
if self.kind == 'complex':
if (ax is None and ax2 is not None) or (ax2 is None and
ax is not None):
raise ValueError('For complex grids, one must specify ' +
'both optional arguments axes and axes2.')
self._plot(xticks=xticks, yticks=yticks, ax=ax, ax2=ax2, **kwargs)
if ax is None:
if show:
_plt.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def expand(self, normalization='4pi', csphase=1, **kwargs):
"""
Expand the grid into spherical harmonics.
Usage
-----
clm = x.expand([normalization, csphase, lmax_calc])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax_calc : int, optional, default = x.lmax
Maximum spherical harmonic degree to return.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
return self._expand(normalization=normalization, csphase=csphase,
**kwargs)
def info(self):
"""
Print a summary of the data stored in the SHGrid instance.
Usage
-----
x.info()
"""
print('kind = {:s}\ngrid = {:s}\n'.format(repr(self.kind),
repr(self.grid)), end='')
if self.grid == 'DH':
print('sampling = {:d}\n'.format(self.sampling), end='')
print('nlat = {:d}\nnlon = {:d}\nlmax = {:d}'.format(self.nlat,
self.nlon,
self.lmax))
# ---- Real Driscoll and Healy grid class ----
class DHRealGrid(SHGrid):
"""Class for real Driscoll and Healy (1994) grids."""
@staticmethod
def istype(kind):
return kind == 'real'
@staticmethod
def isgrid(grid):
return grid == 'DH'
def __init__(self, array, copy=True):
self.nlat, self.nlon = array.shape
if self.nlat % 2 != 0:
raise ValueError('Input arrays for DH grids must have an even ' +
'number of latitudes: nlat = {:d}'
.format(self.nlat)
)
if self.nlon == 2 * self.nlat:
self.sampling = 2
elif self.nlat == self.nlon:
self.sampling = 1
else:
raise ValueError('Input array has shape (nlat={:d},nlon={:d})\n'
.format(self.nlat, self.nlon) +
'but needs nlat=nlon or nlat=2*nlon'
)
self.lmax = int(self.nlat / 2 - 1)
self.grid = 'DH'
self.kind = 'real'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""Return the latitudes (in degrees) of the gridded data."""
lats = _np.linspace(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)
return lats
def _lons(self):
"""Return the longitudes (in degrees) of the gridded data."""
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase,
sampling=self.sampling,
**kwargs)
coeffs = SHCoeffs.from_array(cilm,
normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, xticks=[], yticks=[], xlabel='longitude',
ylabel='latitude', ax=None, ax2=None):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
axes.imshow(self.data, origin='upper', extent=(0., 360., -90., 90.))
axes.set(xlabel=xlabel, ylabel=ylabel, xticks=xticks, yticks=yticks)
if ax is None:
fig.tight_layout(pad=0.5)
return fig, axes
# ---- Complex Driscoll and Healy grid class ----
class DHComplexGrid(SHGrid):
"""
Class for complex Driscoll and Healy (1994) grids.
"""
@staticmethod
def istype(kind):
return kind == 'complex'
@staticmethod
def isgrid(grid):
return grid == 'DH'
def __init__(self, array, copy=True):
self.nlat, self.nlon = array.shape
if self.nlat % 2 != 0:
raise ValueError('Input arrays for DH grids must have an even ' +
'number of latitudes: nlat = {:d}'
.format(self.nlat)
)
if self.nlon == 2 * self.nlat:
self.sampling = 2
elif self.nlat == self.nlon:
self.sampling = 1
else:
raise ValueError('Input array has shape (nlat={:d},nlon={:d})\n'
.format(self.nlat, self.nlon) +
'but needs nlat=nlon or nlat=2*nlon'
)
self.lmax = int(self.nlat / 2 - 1)
self.grid = 'DH'
self.kind = 'complex'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""
Return a vector containing the latitudes (in degrees) of each row
of the gridded data.
"""
lats = _np.linspace(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)
return lats
def _lons(self):
"""
Return a vector containing the longitudes (in degrees) of each row
of the gridded data.
"""
lons = _np.linspace(0., 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'schmidt':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDHC(self.data, norm=norm, csphase=csphase,
**kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, xticks=[], yticks=[], xlabel='longitude',
ylabel='latitude', ax=None, ax2=None):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
fig, axes = _plt.subplots(2, 1)
axreal = axes.flat[0]
axcomplex = axes.flat[1]
else:
axreal = ax
axcomplex = ax2
axreal.imshow(self.data.real, origin='upper',
extent=(0., 360., -90., 90.))
axreal.set(title='Real component', xlabel=xlabel, ylabel=ylabel,
xticks=xticks, yticks=yticks)
axcomplex.imshow(self.data.imag, origin='upper',
extent=(0., 360., -90., 90.))
axcomplex.set(title='Imaginary component', xlabel=xlabel,
ylabel=ylabel, xticks=xticks, yticks=yticks)
if ax is None:
fig.tight_layout(pad=0.5)
return fig, axes
# ---- Real Gaus Legendre Quadrature grid class ----
class GLQRealGrid(SHGrid):
"""
Class for real Gauss-Legendre Quadrature grids.
"""
@staticmethod
def istype(kind):
return kind == 'real'
@staticmethod
def isgrid(grid):
return grid == 'GLQ'
def __init__(self, array, zeros=None, weights=None, copy=True):
self.nlat, self.nlon = array.shape
self.lmax = self.nlat - 1
if self.nlat != self.lmax + 1 or self.nlon != 2 * self.lmax + 1:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d})\n'
.format(self.nlat, self.nlon) +
'but needs (nlat={:d}, {:d})'
.format(self.lmax+1, 2*self.lmax+1)
)
if zeros is None or weights is None:
self.zeros, self.weights = _shtools.SHGLQ(self.lmax)
else:
self.zeros = zeros
self.weights = weights
self.grid = 'GLQ'
self.kind = 'real'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""
Return a vector containing the latitudes (in degrees) of each row
of the gridded data.
"""
lats = 90. - _np.arccos(self.zeros) * 180. / _np.pi
return lats
def _lons(self):
"""
Return a vector containing the longitudes (in degrees) of each column
of the gridded data.
"""
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt' " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandGLQ(self.data, self.weights, self.zeros,
norm=norm, csphase=csphase, **kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, xticks=[], yticks=[], xlabel='GLQ longitude index',
ylabel='GLQ latitude index', ax=None, ax2=None):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
axes.imshow(self.data, origin='upper')
axes.set(xlabel=xlabel, ylabel=ylabel, xticks=xticks, yticks=yticks)
if ax is None:
fig.tight_layout(pad=0.5)
return fig, axes
# ---- Complex Gaus Legendre Quadrature grid class ----
class GLQComplexGrid(SHGrid):
"""
Class for complex Gauss Legendre Quadrature grids.
"""
@staticmethod
def istype(kind):
return kind == 'complex'
@staticmethod
def isgrid(grid):
return grid == 'GLQ'
def __init__(self, array, zeros=None, weights=None, copy=True):
self.nlat, self.nlon = array.shape
self.lmax = self.nlat - 1
if self.nlat != self.lmax + 1 or self.nlon != 2 * self.lmax + 1:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d})\n'
.format(self.nlat, self.nlon) +
'but needs (nlat={:d}, {:d})'
.format(self.lmax+1, 2*self.lmax+1)
)
if zeros is None or weights is None:
self.zeros, self.weights = _shtools.SHGLQ(self.lmax)
else:
self.zeros = zeros
self.weights = weights
self.grid = 'GLQ'
self.kind = 'complex'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""Return the latitudes (in degrees) of the gridded data rows."""
lats = 90. - _np.arccos(self.zeros) * 180. / _np.pi
return lats
def _lons(self):
"""Return the longitudes (in degrees) of the gridded data columns."""
lons = _np.linspace(0., 360. - 360. / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt' " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandGLQC(self.data, self.weights, self.zeros,
norm=norm, csphase=csphase, **kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, xticks=[], yticks=[], xlabel='GLQ longitude index',
ylabel='GLQ latitude index', ax=None, ax2=None):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
fig, axes = _plt.subplots(2, 1)
axreal = axes.flat[0]
axcomplex = axes.flat[1]
else:
axreal = ax
axcomplex = ax2
axreal.imshow(self.data.real, origin='upper')
axreal.set(title='Real component', xlabel=xlabel, ylabel=ylabel,
xticks=xticks, yticks=yticks)
axcomplex.imshow(self.data.imag, origin='upper')
axcomplex.set(title='Imaginary component', xlabel=xlabel,
ylabel=ylabel, xticks=xticks, yticks=yticks)
if ax is None:
fig.tight_layout(pad=0.5)
return fig, axes
| ioshchepkov/SHTOOLS | pyshtools/shclasses/shcoeffsgrid.py | Python | bsd-3-clause | 118,730 |
#service.configuration
import yaml
class Configuration(object):
def __init__(self, file):
# self.log = log
self.configfile = yaml.load(open(file))
def get_logging(self):
return self.configfile['Logging']
def set_database_connection(self, connection):
connection.set_connection_data(self.configfile['ConnectionString'])
return connection | batoure/ScienceManager | App/service/configuration.py | Python | mit | 392 |
import sys
import shutil
import os
import stat
import re
import posixpath
import pkg_resources
import zipfile
import tarfile
import subprocess
import textwrap
from pip.exceptions import InstallationError, BadCommand, PipError
from pip.backwardcompat import(WindowsError, string_types, raw_input,
console_to_str, user_site, PermissionError)
from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global
from pip.log import logger
from pip.vendor.distlib import version
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file', 'call_subprocess']
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
if not ((exctype is WindowsError and value.args[0] == 5) or #others
(exctype is OSError and value.args[0] == 13) or #python2.4
(exctype is PermissionError and value.args[3] == 5) #python3.3
):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', ''):
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() #this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMB' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%ikB' % (bytes/1000)
elif bytes > 1000:
return '%.1fkB' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(path))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
if user_site:
return normalize_path(dist_location(dist)).startswith(normalize_path(user_site))
else:
return False
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
"""
return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
#TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=('setuptools', 'pip', 'python'),
include_editables=True,
editables_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE (don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def unzip_file(filename, location, flatten=True):
"""Unzip the file (zip file located at filename) to the destination
location"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(fn, unix_attributes)
finally:
zipfp.close()
def untar_file(filename, location):
"""Untar the file (tar file located at filename) to the destination location"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except:
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl')))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = console_to_str(stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s in %s"
% (command_desc, proc.returncode, cwd))
else:
logger.warn(
"Command %s had error code %s in %s"
% (command_desc, proc.returncode, cwd))
if stdout is not None:
return ''.join(all_output)
def is_prerelease(vers):
"""
Attempt to determine if this is a pre-release using PEP386/PEP426 rules.
Will return True if it is a pre-release and False if not. Versions are
assumed to be a pre-release if they cannot be parsed.
"""
normalized = version.suggest_normalized_version(vers)
if normalized is None:
# Cannot normalize, assume it is a pre-release
return True
parsed = version.normalized_key(normalized)
return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed])
| piyush82/icclab-rcb-web | virtualenv/lib/python2.7/site-packages/pip/util.py | Python | apache-2.0 | 22,686 |
"""
Add and create new modes for running courses on this particular LMS
"""
from django.db import models
from collections import namedtuple
from django.utils.translation import ugettext as _
Mode = namedtuple('Mode', ['slug', 'name', 'min_price', 'suggested_prices', 'currency'])
class CourseMode(models.Model):
"""
We would like to offer a course in a variety of modes.
"""
# the course that this mode is attached to
course_id = models.CharField(max_length=255, db_index=True)
# the reference to this mode that can be used by Enrollments to generate
# similar behavior for the same slug across courses
mode_slug = models.CharField(max_length=100)
# The 'pretty' name that can be translated and displayed
mode_display_name = models.CharField(max_length=255)
# minimum price in USD that we would like to charge for this mode of the course
min_price = models.IntegerField(default=0)
# the suggested prices for this mode
suggested_prices = models.CommaSeparatedIntegerField(max_length=255, blank=True, default='')
# the currency these prices are in, using lower case ISO currency codes
currency = models.CharField(default="usd", max_length=8)
DEFAULT_MODE = Mode('honor', _('Honor Code Certificate'), 0, '', 'usd')
DEFAULT_MODE_SLUG = 'honor'
class Meta:
""" meta attributes of this model """
unique_together = ('course_id', 'mode_slug', 'currency')
@classmethod
def modes_for_course(cls, course_id):
"""
Returns a list of the modes for a given course id
If no modes have been set in the table, returns the default mode
"""
found_course_modes = cls.objects.filter(course_id=course_id)
modes = ([Mode(mode.mode_slug, mode.mode_display_name, mode.min_price, mode.suggested_prices, mode.currency)
for mode in found_course_modes])
if not modes:
modes = [cls.DEFAULT_MODE]
return modes
@classmethod
def modes_for_course_dict(cls, course_id):
return { mode.slug : mode for mode in cls.modes_for_course(course_id) }
@classmethod
def mode_for_course(cls, course_id, mode_slug):
"""
Returns the mode for the course corresponding to mode_slug.
If this particular mode is not set for the course, returns None
"""
modes = cls.modes_for_course(course_id)
matched = [m for m in modes if m.slug == mode_slug]
if matched:
return matched[0]
else:
return None
def __unicode__(self):
return u"{} : {}, min={}, prices={}".format(
self.course_id, self.mode_slug, self.min_price, self.suggested_prices
)
| praveen-pal/edx-platform | common/djangoapps/course_modes/models.py | Python | agpl-3.0 | 2,735 |
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
from win32pipe import *
from win32file import *
from win32event import *
import pywintypes
import win32api
import winerror
import sys, os, traceback
verbose = 0
#def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount = retryCount + 1
try:
return fn(*args)
except win32api.error as exc:
if exc.winerror==winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise
raise RuntimeError("Could not make a connection to the server")
def testClient(server,msg):
if verbose:
print("Sending", msg)
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER))
if verbose:
print("Server sent back '%s'" % data)
print("Sent and received a message!")
def testLargeMessage(server, size = 4096):
if verbose:
print("Sending message of size %d" % (size))
msg = "*" * size
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER))
if len(data)-size:
print("Sizes are all wrong - send %d, got back %d" % (size, len(data)))
def stressThread(server, numMessages, wait):
try:
try:
for i in range(numMessages):
r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER))
except:
traceback.print_exc()
print("Failed after %d messages" % i)
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import _thread
thread_waits = []
for t_num in range(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
_thread.start_new_thread(stressThread, (server,numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import sys, getopt
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl')
for o,a in opts:
if o=='-s':
server = a
if o=='-m':
msg_count = int(a)
if o=='-t':
thread_count = int(a)
if o=='-v':
global verbose
verbose = 1
if o=='-l':
testLargeMessage(server)
msg = " ".join(args).encode("mbcs")
except getopt.error as msg:
print(msg)
my_name = os.path.split(sys.argv[0])[1]
print("Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name)
print(" -v = verbose")
print(" Specifying a value for -t will stress test using that many threads.")
return
testClient(server, msg)
if thread_count > 0:
print("Spawning %d threads each sending %d messages..." % (thread_count, msg_count))
stressTestClient(server, thread_count, msg_count)
if __name__=='__main__':
main()
| sserrot/champion_relationships | venv/Lib/site-packages/win32/Demos/service/pipeTestServiceClient.py | Python | mit | 4,134 |
import pymzn
import asyncio
from pymzn.aio import minizinc
async def main():
solns = await minizinc('async.mzn', all_solutions=True, keep_solutions=False)
while solns.status is not pymzn.Status.COMPLETE:
await asyncio.sleep(1)
for i, soln in enumerate(solns):
if i == 0:
print(soln)
asyncio.run(main())
| paolodragone/PyMzn | examples/asyncronous/async_test.py | Python | mit | 357 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SermonItem(scrapy.Item):
book = scrapy.Field()
title = scrapy.Field()
date_preached = scrapy.Field()
scripture = scrapy.Field()
ref = scrapy.Field()
link = scrapy.Field()
| psyonara/scrapgty | gty/gty/items.py | Python | mit | 369 |
# Generated by Django 1.11.18 on 2019-01-29 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_azure', '0005_ordering'),
]
operations = [
migrations.AlterField(
model_name='virtualmachine',
name='user_data',
field=models.TextField(
blank=True,
help_text='Additional data that will be added to instance on provisioning',
max_length=87380,
),
),
]
| opennode/nodeconductor-assembly-waldur | src/waldur_azure/migrations/0006_user_data.py | Python | mit | 547 |
# encoding: utf-8
# FastCGI-to-WSGI bridge for files/pipes transport (not socket)
#
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# Copyright (c) 2011 Ruslan Keba <ruslan@helicontech.com>
# Copyright (c) 2012 Antoine Martin <antoine@openance.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from future.utils import raise_with_traceback
__author__ = 'Allan Saddi <allan@saddi.com>, Ruslan Keba <ruslan@helicontech.com>, Antoine Martin <antoine@openance.com>'
import msvcrt
import struct
import os
import os.path
import logging
import sys
import traceback
import datetime
from optparse import OptionParser
if sys.version_info >= (3,):
long_int = int
bytes_type = bytes
import urllib.parse as url_parse
def char_to_int(value):
return int(value)
def int_to_char(value):
return bytes([value])
def make_bytes(content):
return bytes(content, FCGI_CONTENT_ENCODING) if type(content) is str else content
else:
long_int = long
bytes_type = str
import urllib as url_parse
def char_to_int(value):
return ord(value)
def int_to_char(value):
return chr(value)
def make_bytes(content):
return content
from django.core.management.base import BaseCommand
from django.conf import settings
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
FCGI_HEADER_NAMES = (
'ERROR TYPE: 0',
'BEGIN_REQUEST',
'ABORT_REQUEST',
'END_REQUEST',
'PARAMS',
'STDIN',
'STDOUT',
'STDERR',
'DATA',
'GET_VALUES',
'GET_VALUES_RESULT',
'UNKNOWN_TYPE',
)
# configuration not from the spec
FCGI_PARAMS_ENCODING = "utf-8"
FCGI_CONTENT_ENCODING = FCGI_PARAMS_ENCODING
FCGI_DEBUG = getattr(settings, 'FCGI_DEBUG', settings.DEBUG)
FCGI_LOG = getattr(settings, 'FCGI_LOG', FCGI_DEBUG)
FCGI_LOG_PATH = getattr(settings, 'FCGI_LOG_PATH', os.path.dirname(os.path.abspath(sys.argv[0])))
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = b''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return b''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += b''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return b''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += b''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find(b'\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
if length is not None and len(self._buf) >= length + self._pos:
newPos = self._pos + length
break
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
to_write = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = to_write
rec.contentData = data[:to_write]
self._conn.writeRecord(rec)
data = data[to_write:]
length -= to_write
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = b''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = char_to_int(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = char_to_int(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos + 4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos + nameLength]
pos += nameLength
value = s[pos:pos + valueLength]
pos += valueLength
# when decoding, the fallback encoding must be one which can encode any binary value
# i.e. it must be a code-page-based encoding with no undefined values - e.g. cp850.
try:
return pos, (name.decode(FCGI_PARAMS_ENCODING), value.decode(FCGI_PARAMS_ENCODING))
except UnicodeError:
return pos, (name.decode('cp850'), value.decode('cp850'))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = int_to_char(nameLength)
else:
s = struct.pack('!L', nameLength | long_int('0x80000000'))
valueLength = len(value)
if valueLength < 128:
s += int_to_char(valueLength)
else:
s += struct.pack('!L', valueLength | long_int('0x80000000'))
# when encoding, the fallback encoding must be one which can encode any unicode code point
# i.e. it must be a UTF-* encoding. since we're on the web the default choice is UTF-8.
try:
return s + name.encode(FCGI_PARAMS_ENCODING) + value.encode(FCGI_PARAMS_ENCODING)
except UnicodeError:
return s + name.encode('utf-8') + value.encode('utf-8')
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = b''
def _recvall(stream, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
if FCGI_DEBUG: logging.debug('_recvall (%d)' % (length))
dataList = []
recvLen = 0
while length:
data = stream.read(length)
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
# if FCGI_DEBUG: logging.debug('recived length = %d' % (recvLen))
return b''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, stream):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(stream, FCGI_HEADER_LEN)
except:
raise
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if FCGI_DEBUG:
hex = ''
for s in header:
hex += '%x|' % (char_to_int(s))
logging.debug('recv fcgi header: %s %s len: %d' % (
FCGI_HEADER_NAMES[self.type] if self.type is not None and self.type < FCGI_MAXTYPE else
FCGI_HEADER_NAMES[FCGI_MAXTYPE],
hex, len(header)
))
if self.contentLength:
try:
self.contentData, length = self._recvall(stream, self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(stream, self.paddingLength)
except:
raise EOFError
def _sendall(stream, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
if FCGI_DEBUG: logging.debug('_sendall: len=%d' % len(data))
stream.write(data)
_sendall = staticmethod(_sendall)
def write(self, stream):
"""Encode and write a Record to a socket."""
if not self.contentLength:
self.paddingLength = 8
else:
self.paddingLength = -self.contentLength & 7
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
if FCGI_DEBUG:
logging.debug(
'send fcgi header: %s' %
FCGI_HEADER_NAMES[self.type] if self.type is not None and self.type < FCGI_MAXTYPE else
FCGI_HEADER_NAMES[FCGI_MAXTYPE]
)
self._sendall(stream, header)
if self.contentLength:
if FCGI_DEBUG: logging.debug('send CONTENT')
self._sendall(stream, self.contentData)
if self.paddingLength:
if FCGI_DEBUG: logging.debug('send PADDING')
self._sendall(stream, b'\x00' * self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except Exception as instance:
logging.exception(instance) # just in case there's another error reporting the exception
# TODO: this appears to cause FCGI timeouts sometimes. is it an exception loop?
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if FCGI_DEBUG: logging.debug('protocolStatus = %d, appStatus = %d' % (protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=long_int('0'), protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.flush()
self.stderr.flush()
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, stdin, stdout, server):
self._stdin = stdin
self._stdout = stdout
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except KeyboardInterrupt:
break
# except EOFError, inst:
# raise
# if FCGI_DEBUG: logging.error(str(inst))
# break
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
if not self._keepGoing:
return
rec = Record()
rec.read(self._stdin)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._stdout)
def end_request(self, req, appStatus=long_int('0'), protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
# write empty packet to stdin
rec = Record(FCGI_STDOUT, req.requestId)
rec.contentData = ''
rec.contentLength = 0
self.writeRecord(rec)
# write end request
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
if FCGI_DEBUG: logging.debug('end_request: removing request from list')
del self._requests[req.requestId]
if FCGI_DEBUG: logging.debug('end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
if FCGI_DEBUG: logging.debug('end_request: set _keepGoing = False')
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, long_int(0), FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if inrec.contentLength:
if req is not None:
req.stdin.add_data(inrec.contentData)
else:
self._start_request(req)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class FCGIServer(object):
request_class = Request
maxwrite = 8192
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, application, environ=None,
multithreaded=False, multiprocess=False,
debug=False, roles=(FCGI_RESPONDER,),
app_root=None):
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
self.multiprocess = multiprocess
self.debug = debug
self.roles = roles
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self.app_root = app_root
def run(self):
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
stdout = os.fdopen(sys.stdin.fileno(), 'wb', 0)
conn = Connection(stdin, stdout, self)
try:
conn.run()
except Exception as e:
logging.exception(e)
raise
def handler(self, req):
"""Special handler for WSGI."""
if req.role not in self.roles:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1, 0)
environ['wsgi.input'] = req.stdin
# TODO - sys.stderr appears to be None here?? (on Windows/IIS)
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = False
environ['wsgi.run_once'] = False
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is bytes_type, 'write() argument must be bytes'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header, value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s.encode(FCGI_CONTENT_ENCODING))
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise_with_traceback(exc_info[0](exc_info[1]))
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if FCGI_DEBUG:
logging.debug('response headers:')
for name, val in response_headers:
assert type(name) is str, 'Header name "%s" must be a string' % name
assert type(val) is str, 'Value of header "%s" must be a string' % name
logging.debug('%s: %s' % (name, val))
headers_set[:] = [status, response_headers]
return write
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(make_bytes(data))
if not headers_sent:
write(b'') # in case body was empty
finally:
# if hasattr(result, 'close'):
# result.close()
pass
# except socket.error, e:
# if e[0] != errno.EPIPE:
# raise # Don't let EPIPE propagate beyond server
except:
raise
finally:
pass
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if FCGI_DEBUG:
logging.debug('raw envs: {0}'.format(environ))
# if not environ.has_key('SCRIPT_NAME'):
# environ['SCRIPT_NAME'] = ''
# TODO: fix for django
environ['SCRIPT_NAME'] = ''
reqUri = None
if 'REQUEST_URI' in environ:
reqUri = environ['REQUEST_URI'].split('?', 1)
if 'PATH_INFO' not in environ or not environ['PATH_INFO']:
if reqUri is not None:
environ['PATH_INFO'] = reqUri[0]
else:
environ['PATH_INFO'] = ''
# convert %XX to python unicode
environ['PATH_INFO'] = url_parse.unquote(environ['PATH_INFO'])
# process app_root
if self.app_root and environ['PATH_INFO'].startswith(self.app_root):
environ['PATH_INFO'] = environ['PATH_INFO'][len(self.app_root):]
if 'QUERY_STRING' not in environ or not environ['QUERY_STRING']:
if reqUri is not None and len(reqUri) > 1:
environ['QUERY_STRING'] = reqUri[1]
else:
environ['QUERY_STRING'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name, default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if name not in environ:
message = '%s: missing FastCGI param %s required by WSGI!\n' % (
self.__class__.__name__, name)
environ['wsgi.errors'].write(message.encode(FCGI_CONTENT_ENCODING))
environ[name] = default
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
if self.debug:
import cgitb
req.stdout.write(b'Status: 500 Internal Server Error\r\n' +
b'Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()).encode(FCGI_CONTENT_ENCODING))
else:
errorpage = b"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>Unhandled Exception</title>
</head><body>
<h1>Unhandled Exception</h1>
<p>An unhandled exception was thrown by the application.</p>
</body></html>
"""
req.stdout.write(b'Status: 500 Internal Server Error\r\n' +
b'Content-Type: text/html\r\n\r\n' +
errorpage)
def example_application(environ, start_response):
'''example wsgi app which outputs wsgi environment'''
logging.debug('wsgi app started')
data = ''
env_keys = environ.keys()
env_keys.sort()
for e in env_keys:
data += '%s: %s\n' % (e, environ[e])
data += 'sys.version: ' + sys.version + '\n'
start_response('200 OK', [('Content-Type', 'text/plain'), ('Content-Length', str(len(data)))])
yield data.encode(FCGI_CONTENT_ENCODING)
def run_example_app():
if FCGI_DEBUG: logging.info('run_fcgi: STARTED')
FCGIServer(example_application).run()
if FCGI_DEBUG: logging.info('run_fcgi: EXITED')
def run_django_app(django_settings_module, django_root):
'''run django app by django_settings_module,
django_settings_module can be python path or physical path
'''
if os.path.exists(django_settings_module):
# this is physical path
app_path, app_settings = os.path.split(django_settings_module)
# add diretory to PYTHONPATH
app_dir = os.path.dirname(app_path)
if app_dir not in sys.path:
sys.path.append(app_dir)
if FCGI_DEBUG: logging.debug('%s added to PYTHONPATH' % app_dir)
# cut .py extension in module
if app_settings.endswith('.py'):
app_settings = app_settings[:-3]
# get python path to settings
settings_module = '%s.%s' % (os.path.basename(app_path), app_settings)
else:
# consider that django_settings_module is valid python path
settings_module = django_settings_module
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
if FCGI_DEBUG: logging.info('DJANGO_SETTINGS_MODULE set to %s' % settings_module)
try:
from django.core.handlers.wsgi import WSGIHandler
except ImportError:
if FCGI_DEBUG: logging.error(
'Could not import django.core.handlers.wsgi module. Check that django is installed and in PYTHONPATH.')
raise
FCGIServer(WSGIHandler(), app_root=django_root).run()
class Command(BaseCommand):
args = '[root_path]'
help = '''Run as a fcgi server'''
def handle(self, *args, **options):
django_root = args[0] if args else None
if FCGI_LOG:
logging.basicConfig(
filename=os.path.join(FCGI_LOG_PATH, 'fcgi_%s_%d.log' % (
datetime.datetime.now().strftime('%y%m%d_%H%M%S'), os.getpid())),
filemode='w',
format='%(asctime)s [%(levelname)-5s] %(message)s',
level=logging.DEBUG)
try:
from django.core.handlers.wsgi import WSGIHandler
except ImportError:
if FCGI_DEBUG: logging.error(
'Could not import django.core.handlers.wsgi module. Check that django is installed and in PYTHONPATH.')
raise
FCGIServer(WSGIHandler(), app_root=django_root, debug=settings.DEBUG).run()
if __name__ == '__main__':
# compile self
compiled = os.path.split(__file__)[-1].replace('.py', '.pyc' if FCGI_DEBUG else '.pyo')
if not os.path.exists(compiled):
import py_compile
try:
py_compile.compile(__file__)
except:
pass
# enable logging
if FCGI_DEBUG:
logging.basicConfig(
filename=os.path.join(FCGI_LOG_PATH,
'fcgi_%s_%d.log' % (datetime.datetime.now().strftime('%y%m%d_%H%M%S'), os.getpid())),
filemode='w',
format='%(asctime)s [%(levelname)-5s] %(message)s',
level=logging.DEBUG)
# If we are inside a subdirectory of a django app, set the default Djan
default_django_settings_module = None
parent_settings_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'settings.py')
if os.path.exists(parent_settings_file):
default_django_settings_module = os.path.abspath(parent_settings_file)
if FCGI_DEBUG:
logging.info('default DJANGO_SETTINGS_MODULE set to %s' % default_django_settings_module)
# parse options
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("", "--django-settings-module", dest="django_settings_module",
help="python or physical path to Django settings module")
parser.add_option("", "--django-root", dest="django_root",
help="strip this string from the front of any URLs before matching them against your URLconf patterns.")
parser.set_defaults(
django_settings_module=os.environ.get('DJANGO_SETTINGS_MODULE', default_django_settings_module),
django_root=os.environ.get('django.root', None)
)
(options, args) = parser.parse_args()
# check django
if options.django_settings_module:
run_django_app(options.django_settings_module, options.django_root)
else:
# run example app
run_example_app()
| abide/django-windows-tools | django_windows_tools/management/commands/winfcgi.py | Python | bsd-2-clause | 36,737 |
from binding import *
from classes.Enum import *
def booleanDefinition(enum):
return "%s = %s" % (enumBID(enum), enum.value)
def booleanImportDefinition(api, enum):
qualifier = api + "::"
return "using %s%s;" % (qualifier, enumBID(enum))
def forwardBoolean(enum):
return "static const GLboolean %s = GLboolean::%s;" % (enumBID(enum), enumBID(enum))
def genBooleans(api, enums, outputdir, outputfile, forward = False):
of_all = outputfile.replace("?", "F")
t = template(of_all).replace("%a", api)
of = outputfile.replace("?", "")
od = outputdir.replace("?", "")
status(od + of)
tgrouped = groupEnumsByType(enums)
pureBooleans = tgrouped["GLboolean"]
if not os.path.exists(od):
os.makedirs(od)
with open(od + of, 'w') as file:
if forward:
file.write(t % (("\n").join([ forwardBoolean(e) for e in pureBooleans ])))
else:
file.write(t % (
(",\n" + tab).join([ booleanDefinition(e) for e in pureBooleans ]),
("\n") .join([ forwardBoolean(e) for e in pureBooleans ])))
def genFeatureBooleans(api, enums, feature, outputdir, outputfile, core = False, ext = False):
of_all = outputfile.replace("?", "F")
version = versionBID(feature, core, ext)
t = template(of_all).replace("%f", version).replace("%a", api)
of = outputfile.replace("?", "")
od = outputdir.replace("?", version)
status(od + of)
tgrouped = groupEnumsByType(enums)
pureBooleans = tgrouped["GLboolean"]
if not os.path.exists(od):
os.makedirs(od)
with open(od + of, 'w') as file:
if not feature:
file.write(t % (
(",\n" + tab).join([ booleanDefinition(e) for e in pureBooleans ]),
("\n") .join([ forwardBoolean(e) for e in pureBooleans ])))
else:
file.write(t % (("\n").join([ booleanImportDefinition(api, e) for e in pureBooleans ])))
def genBooleansFeatureGrouped(api, enums, features, outputdir, outputfile):
# gen functions feature grouped
for f in features:
if f.api == "gl": # ToDo: probably seperate for all apis
genFeatureBooleans(api, enums, f, outputdir, outputfile)
if f.major > 3 or (f.major == 3 and f.minor >= 2):
genFeatureBooleans(api, enums, f, outputdir, outputfile, True)
genFeatureBooleans(api, enums, f, outputdir, outputfile, False, True)
| zesterer/nilts-oldish | extern-glbinding/codegeneration/scripts/gen_booleans.py | Python | gpl-2.0 | 2,482 |
from .context import revas
from revas import UnauthorizedToken
import os
import mock
import pytest
@pytest.fixture()
def assigner():
os.environ['UDACITY_AUTH_TOKEN'] = 'some auth token'
yield revas.Assigner()
@mock.patch('revas.reviewsapi.ReviewsAPI.certifications')
def test_retrieve_certifications_list(mock_certifications, assigner):
projects_list = [{'project_id': 15, 'status': 'certified'},
{'project_id': 14, 'status': 'certified'}]
mock_certifications.return_value.ok = True
mock_certifications.return_value = projects_list
expected_certifications_list = [15, 14]
certifications_list = assigner.certifications()
assert certifications_list == expected_certifications_list
@mock.patch('revas.reviewsapi.ReviewsAPI.certifications')
def test_retrieve_certification_list_searching_just_for_certified_projects(mock_certifications, assigner):
projects_list = [{'project_id': 145, 'status': 'certified'},
{'project_id': 15, 'status': 'applied'},
{'project_id': 14, 'status': 'certified'}]
mock_certifications.return_value.ok = True
mock_certifications.return_value = projects_list
expected_certifications_list = [145, 14]
certifications_list = assigner.certifications()
assert certifications_list == expected_certifications_list
@mock.patch('revas.reviewsapi.ReviewsAPI.certifications')
def test_raise_an_exception_when_return_empty_certifications_list(mock_certifications, assigner):
mock_certifications.return_value.ok = True
mock_certifications.return_value = []
with pytest.raises(Exception):
assigner.certifications()
@mock.patch('revas.assigner.Assigner.certified_languages')
def test_should_return_projects_with_certified_languages(mock_certified_languages, assigner):
expected_languages = ['en-us', 'zh-cn', 'pt-br']
mock_certified_languages.return_value.ok = True
mock_certified_languages.return_value = expected_languages
certifications_list = [1, 2, 3]
expected_projects_with_languages = {'projects':
[{'project_id': certifications_list[0], 'language': expected_languages[0]},
{'project_id': certifications_list[0], 'language': expected_languages[1]},
{'project_id': certifications_list[0], 'language': expected_languages[2]},
{'project_id': certifications_list[1], 'language': expected_languages[0]},
{'project_id': certifications_list[1], 'language': expected_languages[1]},
{'project_id': certifications_list[1], 'language': expected_languages[2]},
{'project_id': certifications_list[2], 'language': expected_languages[0]},
{'project_id': certifications_list[2], 'language': expected_languages[1]},
{'project_id': certifications_list[2], 'language': expected_languages[2]}]}
projects_with_languages = assigner.projects_with_languages(certifications_list)
assert len(projects_with_languages) == len(expected_projects_with_languages)
@mock.patch('revas.reviewsapi.ReviewsAPI.certified_languages')
def test_should_return_certified_languages(mock_certified_languages, assigner):
mock_certified_languages.return_value.ok = True
mock_certified_languages.return_value = {'application': {'languages': ['en-us', 'zh-cn', 'pt-br']}}
expected_certified_languages = ['en-us', 'zh-cn', 'pt-br']
languages = assigner.certified_languages()
assert languages == expected_certified_languages
@mock.patch('revas.reviewsapi.ReviewsAPI.certified_languages')
def test_raise_an_exception_when_return_empty_certified_languages_list(mock_certified_languages, assigner):
mock_certified_languages.return_value.ok = True
mock_certified_languages.return_value = {'application': {'languages': []}}
with pytest.raises(Exception):
assigner.certified_languages()
@mock.patch('revas.reviewsapi.ReviewsAPI.assigned_count')
def test_return_false_when_has_less_than_the_limit_of_projects_in_review(mock_assigned_count, assigner):
mock_assigned_count.return_value = {'assigned_count': 2}
answer = assigner.has_less_than_the_limit_of_projects_in_review()
assert answer is False
@mock.patch('revas.reviewsapi.ReviewsAPI.assigned_count')
def test_return_true_when_has_less_than_the_limit_of_projects_in_review(mock_assigned_count, assigner):
mock_assigned_count.return_value = {'assigned_count': 1}
answer = assigner.has_less_than_the_limit_of_projects_in_review()
assert answer is True
@mock.patch('revas.reviewsapi.ReviewsAPI.submission_requests')
def test_verify_which_active_submission_requests_the_reviewer_has(mock_submission_requests, assigner):
expected_submission_request_response = [{
'id': 29,
'user_id': 1938,
'status': 'fulfilled',
'closed_at': '2016-03-16T10:35:58.841Z',
'created_at': '2016-03-16T10:25:58.841Z',
'submission_id': 109341,
'updated_at': '2016-03-16T10:35:58.841Z',
'submission_request_projects': [
{ 'project_id': 42, 'language': 'en-us' },
{ 'project_id': 57, 'language': 'pt-br' }
]
}]
mock_submission_requests.return_value = expected_submission_request_response
active_requests = assigner.active_submission_requests()
assert active_requests == expected_submission_request_response
@mock.patch('revas.reviewsapi.ReviewsAPI.submission_requests')
def test_return_an_empty_list_when_there_are_none_active_submission_requests(mock_submission_requests, assigner):
expected_submission_request_response = []
mock_submission_requests.return_value = expected_submission_request_response
active_requests = assigner.active_submission_requests()
assert active_requests == expected_submission_request_response
def test_return_true_if_user_was_assigned_to_a_new_review(assigner):
active_requests = [{
'id': 29,
'user_id': 1938,
'status': 'fulfilled',
'closed_at': '2016-03-16T10:35:58.841Z',
'created_at': '2016-03-16T10:25:58.841Z',
'submission_id': 109341,
'updated_at': '2016-03-16T10:35:58.841Z',
'submission_request_projects': [
{ 'project_id': 42, 'language': 'en-us' },
{ 'project_id': 57, 'language': 'pt-br' }
]
}]
assigned = assigner.assigned_to_new_review(active_requests)
assert assigned is True
def test_return_false_if_user_was_not_assigned_to_a_new_review(assigner):
active_requests = [{
'id': 29,
'user_id': 1938,
'status': 'available',
'closed_at': '2016-03-16T10:35:58.841Z',
'created_at': '2016-03-16T10:25:58.841Z',
'submission_id': 109341,
'updated_at': '2016-03-16T10:35:58.841Z',
'submission_request_projects': [
{ 'project_id': 42, 'language': 'en-us' },
{ 'project_id': 57, 'language': 'pt-br' }
]
}]
assigned = assigner.assigned_to_new_review(active_requests)
assert assigned is False
| anapaulagomes/reviews-assigner | tests/test_assigner.py | Python | mit | 7,282 |
"""
FieldOverride that forces graded components to be only accessible to
students in the Unlocked Group of the ContentTypeGating partition.
"""
from __future__ import absolute_import
from django.conf import settings
from lms.djangoapps.courseware.field_overrides import FieldOverrideProvider
from openedx.features.content_type_gating.helpers import CONTENT_GATING_PARTITION_ID
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
class ContentTypeGatingFieldOverride(FieldOverrideProvider):
"""
A concrete implementation of
:class:`~courseware.field_overrides.FieldOverrideProvider` which forces
graded content to only be accessible to the Full Access group
"""
def get(self, block, name, default):
if name != 'group_access':
return default
graded = getattr(block, 'graded', False)
has_score = block.has_score
weight_not_zero = getattr(block, 'weight', 0) != 0
problem_eligible_for_content_gating = graded and has_score and weight_not_zero
if not problem_eligible_for_content_gating:
return default
# We want to fetch the value set by course authors since it should take precedence.
# We cannot simply call "block.group_access" to fetch that value even if we disable
# field overrides since it will set the group access field to "dirty" with
# the value read from the course content. Since most content does not have any
# value for this field it will usually be the default empty dict. This field
# override changes the value, however, resulting in the LMS thinking that the
# field data needs to be written back out to the store. This doesn't work,
# however, since this is a read-only setting in the LMS context. After this
# call to get() returns, the _dirty_fields dict will be set correctly to contain
# the value from this field override. This prevents the system from attempting
# to save the overridden value when it thinks it has changed when it hasn't.
original_group_access = None
if self.fallback_field_data.has(block, 'group_access'):
raw_value = self.fallback_field_data.get(block, 'group_access')
group_access_field = block.fields.get('group_access')
if group_access_field is not None:
original_group_access = group_access_field.from_json(raw_value)
if original_group_access is None:
original_group_access = {}
# For Feature Based Enrollments, we want to inherit group access configurations
# from parent blocks. The use case is to allow granting access
# to all graded problems in a unit at the unit level
parent = block.get_parent()
if parent is not None:
merged_group_access = parent.merged_group_access
if merged_group_access and CONTENT_GATING_PARTITION_ID in merged_group_access:
return original_group_access
original_group_access.setdefault(
CONTENT_GATING_PARTITION_ID,
[settings.CONTENT_TYPE_GATE_GROUP_IDS['full_access']]
)
return original_group_access
@classmethod
def enabled_for(cls, course):
"""This simple override provider is always enabled"""
return ContentTypeGatingConfig.enabled_for_course(course_key=course.scope_ids.usage_id.course_key)
| ESOedX/edx-platform | openedx/features/content_type_gating/field_override.py | Python | agpl-3.0 | 3,469 |
__author__ = 'oskyar'
from TFG.apps.answer.forms import InlineAnswerFormSet, AnswerInline
from TFG.apps.topic.models import Topic, Subtopic
from TFG.apps.subject.models import Subject
from TFG.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse_lazy
from django.http import JsonResponse, HttpResponse
from django.core import serializers
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from vanilla import CreateView, TemplateView, RedirectView, ListView
from django.shortcuts import redirect
class SearchView(ListView):
template_name = "search/search.html"
context_object_name = "subjects"
def get_queryset(self):
return Subject.objects.filter(name__icontains=self.kwargs.get('search')).exclude(
teacher__user_id=self.request.user.id).exclude(students__exact=self.request.user.userProfile)
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data()
context['breadcrumbs'] = self.get_breadcrumbs()
context['subjects_owner'] = self.request.user.userProfile.subjects.all()
context['my_subjects'] = self.request.user.userProfile.my_subjects.all()
context['token'] = self.kwargs['search']
context['last_subjects'] = Subject.objects.all().order_by('created_on')[:10]
return context
def get(self, request, *args, **kwargs):
# return redirect('search', kwargs=kwargs)
return super(SearchView, self).get(request, args, kwargs)
def get_breadcrumbs(self):
breadcrumbs = list()
breadcrumbs.append(
{"url": "/", "title": "Inicio", "tooltip": "Inicio"})
breadcrumbs.append(
{"url": "#",
"title": _("Buscador"),
"tooltip": _("Buscador de asignaturas por nombre y autor")})
return breadcrumbs
| oskyar/test-TFG | TFG/apps/search/views.py | Python | gpl-2.0 | 1,891 |
# Copyright (c) 2014-2021 Jan Kaliszewski (zuo) & others. All rights reserved.
#
# Licensed under the MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
*unittest_expander* is a Python library that provides flexible and
easy-to-use tools to parameterize your unit tests, especially those
based on :class:`unittest.TestCase` from the Python standard library.
The :mod:`unittest_expander` module provides the following tools:
* a test class decorator: :func:`expand`,
* a test method decorator: :func:`foreach`,
* two helper classes: :class:`param` and :class:`paramseq`.
Let's see how to use them...
.. _expand-and-foreach-basics:
Basic use of :func:`expand` and :func:`foreach`
===============================================
Assume we have a (somewhat trivial, in fact) function that checks
whether the given number is even or not:
>>> def is_even(n):
... return n % 2 == 0
Of course, in the real world the code we write is usually more
interesting... Anyway, most often we want to test how it works for
different parameters. At the same time, it is not the best idea to
test many cases in a loop within one test method -- because of lack
of test isolation (tests depend on other ones -- they may inherit some
state which can affect their results), less information on failures (a
test failure prevents subsequent tests from being run), less clear
result messages (you don't see at first glance which case is the
actual culprit) etc.
So let's write our tests in a smarter way:
>>> import unittest
>>> from unittest_expander import expand, foreach
>>>
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(0, 2, -14) # call variant #1: parameter collection
... def test_even(self, n): # specified using multiple arguments
... self.assertTrue(is_even(n))
...
... @foreach([-1, 17]) # call variant #2: parameter collection as
... def test_odd(self, n): # one argument being a container (e.g. list)
... self.assertFalse(is_even(n))
...
... # just to demonstrate that test cases are really isolated
... def setUp(self):
... sys.stdout.write(' [DEBUG: separate test setUp] ')
As you see, it's fairly simple: you attach parameter collections to your
test methods with the :func:`foreach` decorator and decorate the whole
test case class with the :func:`expand` decorator. The latter does the
actual job, i.e. generates (and adds to the test case class)
parameterized versions of the methods.
Let's run this stuff...
>>> # a helper function that will run tests in our examples
>>> # -- NORMALLY YOU DON'T NEED IT, of course!
>>> import sys
>>> def run_tests(*test_case_classes):
... suite = unittest.TestSuite(
... unittest.TestLoader().loadTestsFromTestCase(cls)
... for cls in test_case_classes)
... unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_even__<-14> ... [DEBUG: separate test setUp] ok
test_even__<0> ... [DEBUG: separate test setUp] ok
test_even__<2> ... [DEBUG: separate test setUp] ok
test_odd__<-1> ... [DEBUG: separate test setUp] ok
test_odd__<17> ... [DEBUG: separate test setUp] ok
...Ran 5 tests...
OK
To test our *is_even()* function we created two test case methods --
each accepting one parameter value.
Another approach could be to define a method that accepts a couple of
arguments:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... (-14, True),
... (-1, False),
... (0, True),
... (2, True),
... (17, False),
... )
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-1,False> ... ok
test_is_even__<-14,True> ... ok
test_is_even__<0,True> ... ok
test_is_even__<17,False> ... ok
test_is_even__<2,True> ... ok
...Ran 5 tests...
OK
As you see, you can use a tuple to specify several parameter values for
a test call.
.. _param-basics:
More flexibility: :class:`param`
================================
Parameters can also be specified in a more descriptive way -- with
keyword arguments. It is possible when you use :class:`param` objects
instead of tuples:
>>> from unittest_expander import param
>>>
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... param(-14, expected=True),
... param(-1, expected=False),
... param(0, expected=True),
... param(2, expected=True),
... param(17, expected=False),
... )
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-1,expected=False> ... ok
test_is_even__<-14,expected=True> ... ok
test_is_even__<0,expected=True> ... ok
test_is_even__<17,expected=False> ... ok
test_is_even__<2,expected=True> ... ok
...Ran 5 tests...
OK
Generated *labels* of our tests (attached to the names of the generated
test methods) became less cryptic. But what to do if we need to label
our parameters explicitly?
We can use the :meth:`~param.label` method of :class:`param` objects:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... param(sys.maxsize, expected=False).label('sys.maxsize'),
... param(-sys.maxsize, expected=False).label('-sys.maxsize'),
... )
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-sys.maxsize> ... ok
test_is_even__<sys.maxsize> ... ok
...Ran 2 tests...
OK
If a test method accepts the `label` keyword argument, the appropriate
label (either auto-generated from parameter values or explicitly
specified with :meth:`param.label`) will be passed in as that
argument:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... param(sys.maxsize, expected=False).label('sys.maxsize'),
... param(-sys.maxsize, expected=False).label('-sys.maxsize'),
... )
... def test_is_even(self, n, expected, label):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
... assert label in ('sys.maxsize', '-sys.maxsize')
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-sys.maxsize> ... ok
test_is_even__<sys.maxsize> ... ok
...Ran 2 tests...
OK
.. _other-ways-to-label:
Other ways to label your tests explicitly
=========================================
You can also label particular tests by passing a dictionary directly
into :func:`foreach`:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach({
... 'noninteger': (1.2345, False),
... 'horribleabuse': ('%s', False),
... })
... def test_is_even(self, n, expected, label):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
... assert label in ('noninteger', 'horribleabuse')
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<horribleabuse> ... ok
test_is_even__<noninteger> ... ok
...Ran 2 tests...
OK
...or just using keyword arguments:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... noninteger=(1.2345, False),
... horribleabuse=('%s', False),
... )
... def test_is_even(self, n, expected, label):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
... assert label in ('noninteger', 'horribleabuse')
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<horribleabuse> ... ok
test_is_even__<noninteger> ... ok
...Ran 2 tests...
OK
.. _paramseq-basics:
Smart parameter collection: :class:`paramseq`
=============================================
How to concatenate some separately created parameter collections?
Just transform them (or at least the first of them) into
:class:`paramseq` instances -- and then add one to another
(with the ``+`` operator):
>>> from unittest_expander import paramseq
>>>
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... basic_params1 = paramseq( # init variant #1: several parameters
... param(-14, expected=True),
... param(-1, expected=False),
... )
... basic_params2 = paramseq([ # init variant #2: one parameter collection
... param(0, expected=True).label('just zero, because why not?'),
... param(2, expected=True),
... param(17, expected=False),
... ])
... basic = basic_params1 + basic_params2
...
... huge = paramseq({ # explicit labelling by passing a dict
... 'sys.maxsize': param(sys.maxsize, expected=False),
... '-sys.maxsize': param(-sys.maxsize, expected=False),
... })
...
... other = paramseq(
... (-15, False),
... param(15, expected=False),
... # explicit labelling with keyword arguments:
... noninteger=param(1.2345, expected=False),
... horribleabuse=param('%s', expected=False),
... )
...
... just_dict = {
... '18->True': (18, True),
... }
...
... just_list = [
... param(12399999999999999, False),
... param(n=12399999999999998, expected=True),
... ]
...
... # just add them one to another (producing a new paramseq)
... all_params = basic + huge + other + just_dict + just_list
...
... @foreach(all_params)
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-1,expected=False> ... ok
test_is_even__<-14,expected=True> ... ok
test_is_even__<-15,False> ... ok
test_is_even__<-sys.maxsize> ... ok
test_is_even__<15,expected=False> ... ok
test_is_even__<17,expected=False> ... ok
test_is_even__<18->True> ... ok
test_is_even__<2,expected=True> ... ok
test_is_even__<<12399999999...>,False> ... ok
test_is_even__<expected=True,n=<12399999999...>> ... ok
test_is_even__<horribleabuse> ... ok
test_is_even__<just zero, because why not?> ... ok
test_is_even__<noninteger> ... ok
test_is_even__<sys.maxsize> ... ok
...Ran 14 tests...
OK
.. note::
Parameter collections (such as sequences, mappings, sets or
:class:`paramseq` instances) do not need to be created or bound
within the test case class body; you could, for example, import them
from a separate module. Obviously, it makes code reuse and
refactorization easier.
Also, note that the signatures of the :func:`foreach` decorator and
the :class:`paramseq` constructor are identical: you pass in either
exactly one positional argument which is a parameter collection or
several (more than one) positional and/or keyword arguments being
singular parameter values or tuples of parameter values, or
:class:`param` instances.
.. note::
We said that a parameter collection can be a sequence (among others;
see the note above). To be more precise: it can be a sequence except
that it cannot be a string*;
A :class:`paramseq` instance can also be created from a callable object
that returns a sequence or another iterable (e.g. a generator):
>>> from random import randint
>>>
>>> @paramseq # <- yes, used as a decorator
... def randomized(test_case_cls):
... LO, HI = test_case_cls.LO, test_case_cls.HI
... print('DEBUG: LO = {0}; HI = {1}'.format(LO, HI))
... print('----')
... yield param(randint(LO, HI) * 2,
... expected=True).label('random even')
... yield param(randint(LO, HI) * 2 + 1,
... expected=False).label('random odd')
...
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... LO = -100
... HI = 100
...
... @foreach(randomized)
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
... # reusing the same instance of paramseq to show that the underlying
... # callable is called separately for each use of @foreach:
... @foreach(randomized)
... def test_is_even_negated_when_incremented(self, n, expected):
... actual = (not is_even(n + 1))
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
DEBUG: LO = -100; HI = 100
----
DEBUG: LO = -100; HI = 100
----
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<random even> ... ok
test_is_even__<random odd> ... ok
test_is_even_negated_when_incremented__<random even> ... ok
test_is_even_negated_when_incremented__<random odd> ... ok
...Ran 4 tests...
OK
The callable object (such as the generator function in the above
example) that is passed into the :class:`paramseq` constructor can
accept no arguments or one positional argument -- in the latter case
the test case class is passed in.
.. note::
The callable object is called, and its iterable result is iterated
over, *when* the :func:`expand` decorator is being executed, that is,
*before* generating parameterized test methods.
What *should also be emphasized* is that those operations (the
aforementioned call and then iteration over its result) are performed
*separately* for each use of :func:`foreach` with our
:class:`paramseq` instance as its argument (or with another
:class:`paramseq` instance that includes our instance; see the
following code snippet in which the ``input_values_and_results``
instance includes the previously created ``randomized`` instance...).
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... LO = -999999
... HI = 999999
...
... # reusing the same, previously created, instance of paramseq
... # (`randomized`) to show that the underlying callable will
... # still be called separately for each use of @foreach...
... input_values_and_results = randomized + [
... param(-14, expected=True),
... param(-1, expected=False),
... param(0, expected=True),
... param(2, expected=True),
... param(17, expected=False),
... ]
...
... @foreach(input_values_and_results)
... def test_is_even(self, n, expected):
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
... @foreach(input_values_and_results)
... def test_is_even_negated_when_incremented(self, n, expected):
... actual = (not is_even(n + 1))
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
DEBUG: LO = -999999; HI = 999999
----
DEBUG: LO = -999999; HI = 999999
----
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<-1,expected=False> ... ok
test_is_even__<-14,expected=True> ... ok
test_is_even__<0,expected=True> ... ok
test_is_even__<17,expected=False> ... ok
test_is_even__<2,expected=True> ... ok
test_is_even__<random even> ... ok
test_is_even__<random odd> ... ok
test_is_even_negated_when_incremented__<-1,expected=False> ... ok
test_is_even_negated_when_incremented__<-14,expected=True> ... ok
test_is_even_negated_when_incremented__<0,expected=True> ... ok
test_is_even_negated_when_incremented__<17,expected=False> ... ok
test_is_even_negated_when_incremented__<2,expected=True> ... ok
test_is_even_negated_when_incremented__<random even> ... ok
test_is_even_negated_when_incremented__<random odd> ... ok
...Ran 14 tests...
OK
.. _foreach-cartesian:
Combining several :func:`foreach` to get Cartesian product
==========================================================
You can apply two or more :func:`foreach` decorators to the same test
method -- to combine several parameter collections, obtaining their
Cartesian product:
>>> @expand
... class Test_is_even(unittest.TestCase):
...
... # one param collection (7 items)
... @paramseq
... def randomized():
... yield param(randint(-(10 ** 6), 10 ** 6) * 2,
... expected=True).label('random even')
... yield param(randint(-(10 ** 6), 10 ** 6) * 2 + 1,
... expected=False).label('random odd')
... input_values_and_results = randomized + [
... param(-14, expected=True),
... param(-1, expected=False),
... param(0, expected=True),
... param(2, expected=True),
... param(17, expected=False),
... ]
...
... # another param collection (2 items)
... input_types = dict(
... integer=int,
... floating=float,
... )
...
... # let's combine them (7 * 2 -> 14 parameterized tests)
... @foreach(input_values_and_results)
... @foreach(input_types)
... def test_is_even(self, input_type, n, expected):
... n = input_type(n)
... actual = is_even(n)
... self.assertTrue(isinstance(actual, bool))
... self.assertEqual(actual, expected)
...
>>> run_tests(Test_is_even) # doctest: +ELLIPSIS
test_is_even__<floating, -1,expected=False> ... ok
test_is_even__<floating, -14,expected=True> ... ok
test_is_even__<floating, 0,expected=True> ... ok
test_is_even__<floating, 17,expected=False> ... ok
test_is_even__<floating, 2,expected=True> ... ok
test_is_even__<floating, random even> ... ok
test_is_even__<floating, random odd> ... ok
test_is_even__<integer, -1,expected=False> ... ok
test_is_even__<integer, -14,expected=True> ... ok
test_is_even__<integer, 0,expected=True> ... ok
test_is_even__<integer, 17,expected=False> ... ok
test_is_even__<integer, 2,expected=True> ... ok
test_is_even__<integer, random even> ... ok
test_is_even__<integer, random odd> ... ok
...Ran 14 tests...
OK
If parameters combined this way specify some conflicting keyword
arguments they are detected and an error is reported:
>>> params1 = [param(a=1, b=2, c=3)]
>>> params2 = [param(b=4, c=3, d=2)]
>>>
>>> @expand # doctest: +ELLIPSIS
... class TestSomething(unittest.TestCase):
...
... @foreach(params2)
... @foreach(params1)
... def test(self, **kw):
... "something"
...
Traceback (most recent call last):
...
ValueError: conflicting keyword arguments: 'b', 'c'
.. _context-basics:
Context-manager-based fixtures: :meth:`param.context`
=====================================================
When dealing with resources managed with `context managers`_, you can
specify a *context manager factory* and its arguments using the
:meth:`~param.context` method of a :class:`param` object -- then each
call of the resultant parameterized test will be enclosed in a dedicated
*context manager* instance (created by calling the *context manager
factory* with the given arguments).
.. _context managers: https://docs.python.org/reference/
datamodel.html#with-statement-context-managers
>>> from tempfile import NamedTemporaryFile
>>>
>>> @expand
... class TestSaveLoad(unittest.TestCase):
...
... data_with_contexts = [
... param(save='', load='').context(NamedTemporaryFile, 'w+t'),
... param(save='abc', load='abc').context(NamedTemporaryFile, 'w+t'),
... ]
...
... @foreach(data_with_contexts)
... def test_save_load(self, save, load, context_targets):
... file = context_targets[0]
... file.write(save)
... file.seek(0)
... load_actually = file.read()
... self.assertEqual(load_actually, load)
...
... # reusing the same params to show that a *new* context manager
... # instance is created for each test call:
... @foreach(data_with_contexts)
... def test_save_load_with_spaces(self, save, load, context_targets):
... file = context_targets[0]
... file.write(' ' + save + ' ')
... file.seek(0)
... load_actually = file.read()
... self.assertEqual(load_actually, ' ' + load + ' ')
...
>>> run_tests(TestSaveLoad) # doctest: +ELLIPSIS
test_save_load__<load='',save=''> ... ok
test_save_load__<load='abc',save='abc'> ... ok
test_save_load_with_spaces__<load='',save=''> ... ok
test_save_load_with_spaces__<load='abc',save='abc'> ... ok
...Ran 4 tests...
OK
>>>
>>> # repeating the tests to show that, really, a *new* context manager
... # instance is created for *each* test call:
... run_tests(TestSaveLoad) # doctest: +ELLIPSIS
test_save_load__<load='',save=''> ... ok
test_save_load__<load='abc',save='abc'> ... ok
test_save_load_with_spaces__<load='',save=''> ... ok
test_save_load_with_spaces__<load='abc',save='abc'> ... ok
...Ran 4 tests...
OK
*Additional feature:* you can see in the above example is that if a test
method accepts the `context_targets` keyword argument then a list of
context manager *as-targets* (i.e. objects returned by context managers'
:meth:`__enter__`) will be passed in as that argument.
It is a list because there can be more than one *context* per parameter
collection's item, e.g.:
>>> import contextlib
>>> @contextlib.contextmanager
... def debug_cm(tag=None):
... debug.append('enter' + (':{0}'.format(tag) if tag else ''))
... yield tag
... debug.append('exit' + (':{0}'.format(tag) if tag else ''))
...
>>> debug = []
>>>
>>> @expand
... class TestSaveLoad(unittest.TestCase):
...
... params_with_contexts = [
... (
... param(save='', load='', expected_tag='FOO')
... .context(NamedTemporaryFile, 'w+t') # (outer one)
... .context(debug_cm, tag='FOO') # (inner one)
... ),
... (
... param(save='abc', load='abc', expected_tag='BAR')
... .context(NamedTemporaryFile, 'w+t')
... .context(debug_cm, tag='BAR')
... ),
... ]
...
... @foreach(params_with_contexts)
... def test_save_load(self, save, load, expected_tag, context_targets):
... file, tag = context_targets
... assert tag == expected_tag
... file.write(save)
... file.seek(0)
... load_actually = file.read()
... self.assertEqual(load_actually, load)
... debug.append('test')
...
>>> debug == []
True
>>> run_tests(TestSaveLoad) # doctest: +ELLIPSIS
test_save_load__<expected_tag='BAR',load='abc',save='abc'> ... ok
test_save_load__<expected_tag='FOO',load='',save=''> ... ok
...Ran 2 tests...
OK
>>> debug == [
... 'enter:BAR', 'test', 'exit:BAR',
... 'enter:FOO', 'test', 'exit:FOO',
... ]
True
Contexts are properly handled (context managers' :meth:`__enter__` and
:meth:`__exit__` are properly called...) -- also when errors occur
(with some legitimate subtle reservations -- see:
:ref:`contexts-cannot-suppress-exceptions`):
>>> @contextlib.contextmanager
... def err_debug_cm(tag):
... if tag.endswith('context-enter-error'):
... debug.append('ERR-enter:' + tag)
... raise RuntimeError('error in __enter__')
... debug.append('enter:' + tag)
... try:
... yield tag
... if tag.endswith('context-exit-error'):
... raise RuntimeError('error in __exit__')
... except:
... debug.append('ERR-exit:' + tag)
... raise
... else:
... debug.append('exit:' + tag)
...
>>> debug = []
>>> err_params = [
... (
... param().label('no_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner')
... ),
... (
... param().label('test_fail')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner')
... ),
... (
... param().label('test_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner')
... ),
... (
... param().label('inner_context_enter_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner-context-enter-error')
... ),
... (
... param().label('inner_context_exit_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner-context-exit-error')
... ),
... (
... param().label('outer_context_enter_error')
... .context(err_debug_cm, tag='outer-context-enter-error')
... .context(err_debug_cm, tag='inner')
... ),
... (
... param().label('outer_context_exit_error')
... .context(err_debug_cm, tag='outer-context-exit-error')
... .context(err_debug_cm, tag='inner')
... ),
... ]
>>>
>>> @expand
... class SillyTest(unittest.TestCase):
...
... def setUp(self):
... debug.append('setUp')
...
... def tearDown(self):
... debug.append('tearDown')
...
... @foreach(err_params)
... def test(self, label):
... if label == 'test_fail':
... debug.append('FAIL-test')
... self.fail()
... elif label == 'test_error':
... debug.append('ERROR-test')
... raise RuntimeError
... else:
... debug.append('test')
...
>>> run_tests(SillyTest) # doctest: +ELLIPSIS
test__<inner_context_enter_error> ... ERROR
test__<inner_context_exit_error> ... ERROR
test__<no_error> ... ok
test__<outer_context_enter_error> ... ERROR
test__<outer_context_exit_error> ... ERROR
test__<test_error> ... ERROR
test__<test_fail> ... FAIL
...Ran 7 tests...
FAILED (failures=1, errors=5)
>>> debug == [
... # inner_context_enter_error
... 'setUp',
... 'enter:outer',
... 'ERR-enter:inner-context-enter-error',
... 'ERR-exit:outer',
... 'tearDown',
...
... # inner_context_exit_error
... 'setUp',
... 'enter:outer',
... 'enter:inner-context-exit-error',
... 'test',
... 'ERR-exit:inner-context-exit-error',
... 'ERR-exit:outer',
... 'tearDown',
...
... # no_error
... 'setUp',
... 'enter:outer',
... 'enter:inner',
... 'test',
... 'exit:inner',
... 'exit:outer',
... 'tearDown',
...
... # outer_context_enter_error
... 'setUp',
... 'ERR-enter:outer-context-enter-error',
... 'tearDown',
...
... # outer_context_exit_error
... 'setUp',
... 'enter:outer-context-exit-error',
... 'enter:inner',
... 'test',
... 'exit:inner',
... 'ERR-exit:outer-context-exit-error',
... 'tearDown',
...
... # test_error
... 'setUp',
... 'enter:outer',
... 'enter:inner',
... 'ERROR-test',
... 'ERR-exit:inner',
... 'ERR-exit:outer',
... 'tearDown',
...
... # test_fail
... 'setUp',
... 'enter:outer',
... 'enter:inner',
... 'FAIL-test',
... 'ERR-exit:inner',
... 'ERR-exit:outer',
... 'tearDown',
... ]
True
Note that contexts attached to test *method* params (in contrast to
those attached to test *class* params -- see below:
:ref:`foreach-as-class-decorator`) are handled *directly* before (by
running :meth:`__enter__`) and after (by running :meth:`__exit__`) a
given parameterized test method call, that is, *after* :meth:`setUp`
and *before* :meth:`tearDown` calls -- so :meth:`setUp` and
:meth:`tearDown` are unaffected by any errors related to those
contexts.
On the other hand, an error in :meth:`setUp` prevents a test from
being called -- then contexts are not touched at all:
>>> def setUp(self):
... debug.append('setUp')
... raise ValueError
...
>>> SillyTest.setUp = setUp
>>> debug = []
>>> run_tests(SillyTest) # doctest: +ELLIPSIS
test__<inner_context_enter_error> ... ERROR
test__<inner_context_exit_error> ... ERROR
test__<no_error> ... ERROR
test__<outer_context_enter_error> ... ERROR
test__<outer_context_exit_error> ... ERROR
test__<test_error> ... ERROR
test__<test_fail> ... ERROR
...Ran 7 tests...
FAILED (errors=7)
>>> debug == ['setUp', 'setUp', 'setUp', 'setUp', 'setUp', 'setUp', 'setUp']
True
.. _paramseq-context:
Convenience shortcut: :meth:`paramseq.context`
==============================================
You can use the method :meth:`paramseq.context` to apply the given
context properties to *all* parameter items the :class:`paramseq`
instance aggregates:
>>> @contextlib.contextmanager
... def silly_cm():
... yield 42
...
>>> @expand
... class TestSaveLoad(unittest.TestCase):
...
... params_with_contexts = paramseq(
... param(save='', load=''),
... param(save='abc', load='abc'),
... ).context(NamedTemporaryFile, 'w+t').context(silly_cm)
...
... @foreach(params_with_contexts)
... def test_save_load(self, save, load, context_targets):
... file, silly_cm_target = context_targets
... assert silly_cm_target == 42
... file.write(save)
... file.seek(0)
... load_actually = file.read()
... self.assertEqual(load_actually, load)
...
>>> run_tests(TestSaveLoad) # doctest: +ELLIPSIS
test_save_load__<load='',save=''> ... ok
test_save_load__<load='abc',save='abc'> ... ok
...Ran 2 tests...
OK
.. note::
:meth:`paramseq.context` as well as :meth:`param.context` and
:meth:`param.label` methods create new objects (respectively
:class:`paramseq` or :class:`param` instances), *without* modifying
the existing ones.
>>> pseq1 = paramseq(1, 2, 3)
>>> pseq2 = pseq1.context(open, '/etc/hostname', 'rb')
>>> isinstance(pseq1, paramseq) and isinstance(pseq2, paramseq)
True
>>> pseq1 is not pseq2
True
>>> p1 = param(1, 2, c=3)
>>> p2 = p1.context(open, '/etc/hostname', 'rb')
>>> p3 = p2.label('one with label')
>>> isinstance(p1, param) and isinstance(p2, param) and isinstance(p3, param)
True
>>> p1 is not p2
True
>>> p2 is not p3
True
>>> p3 is not p1
True
Generally, instances of these types (:class:`param` and :class:`paramseq`)
should be considered immutable.
.. _foreach-as-class-decorator:
Deprecated feature: :func:`foreach` as a class decorator
========================================================
.. warning::
This is the description of a deprecated feature.
The parts of *unittest_expander* related to applying :func:`foreach`
**to classes** are broken by design and will be, in a future version
of the library, either revamped (in a backwards incompatible way)
or just completely removed.
:func:`foreach` can be used not only as a test case *method* decorator
but also as a test case *class* decorator -- to generate parameterized
test case *classes*.
That allows you to share each specified parameter/context/label across
all test methods. Parameters (and labels, and context targets) are
accessible as instance attributes (*not* as method arguments) from any
test method, as well as from the :meth:`setUp` and :meth:`tearDown`
methods.
>>> params_with_contexts = paramseq( # 2 param items
... param(save='', load=''),
... param(save='abc', load='abc'),
... ).context(NamedTemporaryFile, 'w+t')
>>> useless_data = [ # 2 param items
... param('foo', b=42),
... param('foo', b=433)]
>>>
>>> @expand(into=globals()) # note the 'into' keyword-only argument
... @foreach(params_with_contexts)
... @foreach(useless_data)
... class TestSaveLoad(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
... assert self.save == self.load
... assert self.params == ('foo',) # self.params <- *positional* ones
... assert self.b in (42, 433)
... assert 'foo' in self.label
... # (note: on Python 2.7+ we could resign from using contexts
... # and just use unittest.TestCase.addCleanup() here...)
...
... @foreach(param(suffix=' '), param(suffix='XX')) # 2 param items
... def test_save_load(self, suffix):
... self.file.write(self.save + suffix)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load + suffix)
...
>>> for name in dir(): # doctest: +ELLIPSIS
... if name.startswith('TestSaveLoad'):
... name
...
'TestSaveLoad'
"TestSaveLoad__<'foo',b=42, load='',save=''>"
"TestSaveLoad__<'foo',b=42, load='abc',save='abc'>"
"TestSaveLoad__<'foo',b=433, load='',save=''>"
"TestSaveLoad__<'foo',b=433, load='abc',save='abc'>"
>>>
>>> test_classes = [globals()[name] for name in dir()
... if name.startswith('TestSaveLoad__')]
>>> # (note: 2 * 2 * 2 param items -> 8 parameterized tests)
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_save_load__<suffix=' '> (..._<'foo',b=42, load='',save=''>) ... ok
test_save_load__<suffix='XX'> (..._<'foo',b=42, load='',save=''>) ... ok
test_save_load__<suffix=' '> (..._<'foo',b=42, load='abc',save='abc'>) ... ok
test_save_load__<suffix='XX'> (..._<'foo',b=42, load='abc',save='abc'>) ... ok
test_save_load__<suffix=' '> (..._<'foo',b=433, load='',save=''>) ... ok
test_save_load__<suffix='XX'> (..._<'foo',b=433, load='',save=''>) ... ok
test_save_load__<suffix=' '> (..._<'foo',b=433, load='abc',save='abc'>) ... ok
test_save_load__<suffix='XX'> (..._<'foo',b=433, load='abc',save='abc'>) ... ok
...Ran 8 tests...
OK
As you see, you can combine :func:`foreach` as *class* decorator(s) with
:func:`foreach` as *method* decorator(s) -- you will obtain tests
parameterized with the Cartesian product of the involved parameter
collections.
*Important:* when using :func:`foreach` as a *class* decorator you must
remember to place :func:`expand` as the topmost (the outer) class
decorator (above all :func:`foreach` decorators).
The *into* keyword argument for the :func:`expand` decorator specifies
where the generated (parameterized) subclasses of the decorated test
case class should be placed; the attribute value should be either a
mapping (typically: the :func:`globals()` dictionary) or a
(non-read-only) Python module object, or a (possibly dotted) name of
such a module.
Below: an example with the *into* argument being a module object:
>>> import types
>>> module = types.ModuleType('_my_test_module')
>>>
>>> @expand(into=module)
... @foreach(params_with_contexts)
... class TestSaveLoad(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
...
... def test_save_load(self):
... self.file.write(self.save)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load)
...
>>> for name in dir(module):
... if not name.startswith('__'):
... name # doctest: +ELLIPSIS
...
"TestSaveLoad__<load='',save=''>"
"TestSaveLoad__<load='abc',save='abc'>"
>>>
>>> TestSaveLoad__1 = getattr(module, "TestSaveLoad__<load='',save=''>")
>>> TestSaveLoad__2 = getattr(module, "TestSaveLoad__<load='abc',save='abc'>")
>>>
>>> run_tests(TestSaveLoad__1, TestSaveLoad__2) # doctest: +ELLIPSIS
test_save_load (...TestSaveLoad__<load='',save=''>) ... ok
test_save_load (...TestSaveLoad__<load='abc',save='abc'>) ... ok
...Ran 2 tests...
OK
...and with *into* being an importable module name:
>>> module = types.ModuleType('_my_test_module')
>>> sys.modules['_my_test_module'] = module
>>>
>>> @expand(into='_my_test_module')
... @foreach(params_with_contexts)
... class TestSaveLoad(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
...
... def test_save_load(self):
... self.file.write(self.save)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load)
...
>>> for name in dir(module):
... if not name.startswith('__'):
... name # doctest: +ELLIPSIS
...
"TestSaveLoad__<load='',save=''>"
"TestSaveLoad__<load='abc',save='abc'>"
>>>
>>> TestSaveLoad__1 = getattr(module, "TestSaveLoad__<load='',save=''>")
>>> TestSaveLoad__2 = getattr(module, "TestSaveLoad__<load='abc',save='abc'>")
>>>
>>> run_tests(TestSaveLoad__1, TestSaveLoad__2) # doctest: +ELLIPSIS
test_save_load (...TestSaveLoad__<load='',save=''>) ... ok
test_save_load (...TestSaveLoad__<load='abc',save='abc'>) ... ok
...Ran 2 tests...
OK
...and with *into* not specified -- which has, generally, the same
effect as setting it to the :func:`globals` dictionary (however, this
implicit variant may not work with those Python implementations that do
not support stack frame introspection; *note:* CPython and PyPy do support
it perfectly ``:-)``):
.. doctest::
:hide:
>>> # magic needed only to run the next example in doctests environment
>>> # -- just ignore it
>>> __orig_expand = expand
>>> def expand(test_cls):
... global expand, __name__
... try:
... this = types.ModuleType('__my_weird_module')
... sys.modules['__my_weird_module'] = this
... orig_name = __name__
... try:
... __name__ = this.__name__
... result = __orig_expand(test_cls)
... globals().update(vars(this))
... return result
... finally:
... __name__ = orig_name
... finally:
... expand = __orig_expand
>>> @expand
... @foreach(params_with_contexts)
... class TestSaveLoadIt(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
...
... def test_save_load(self):
... self.file.write(self.save)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load)
...
>>> for name in dir():
... if name.startswith('TestSaveLoadIt'):
... name
...
'TestSaveLoadIt'
"TestSaveLoadIt__<load='',save=''>"
"TestSaveLoadIt__<load='abc',save='abc'>"
>>>
>>> TestSaveLoadIt__1 = globals()["TestSaveLoadIt__<load='',save=''>"]
>>> TestSaveLoadIt__2 = globals()["TestSaveLoadIt__<load='abc',save='abc'>"]
>>>
>>> run_tests(TestSaveLoadIt__1, TestSaveLoadIt__2) # doctest: +ELLIPSIS
test_save_load (...TestSaveLoadIt__<load='',save=''>) ... ok
test_save_load (...TestSaveLoadIt__<load='abc',save='abc'>) ... ok
...Ran 2 tests...
OK
Contexts are, obviously, properly handled -- also when errors occur
(with some legitimate subtle reservations -- see:
:ref:`contexts-cannot-suppress-exceptions`):
>>> debug = [] # see earlier definition of err_debug_cm()...
>>> err_params.extend([ # see earlier initialization of err_params...
... (
... param().label('setUp_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner')
... ),
... (
... param().label('tearDown_error')
... .context(err_debug_cm, tag='outer')
... .context(err_debug_cm, tag='inner')
... ),
... ])
>>> into_dict = {} # this time we'll pass another mapping (not globals())
>>>
>>> @expand(into=into_dict)
... @foreach(err_params)
... class SillyTest(unittest.TestCase):
...
... def setUp(self):
... if self.label == 'setUp_error':
... debug.append('ERR-setUp')
... raise RuntimeError
... debug.append('setUp')
...
... def tearDown(self):
... if self.label == 'tearDown_error':
... debug.append('ERR-tearDown')
... raise RuntimeError
... debug.append('tearDown')
...
... def test(self):
... if self.label == 'test_fail':
... debug.append('FAIL-test')
... self.fail()
... elif self.label == 'test_error':
... debug.append('ERROR-test')
... raise RuntimeError
... else:
... debug.append('test')
...
>>> for name in sorted(into_dict):
... name
...
'SillyTest__<inner_context_enter_error>'
'SillyTest__<inner_context_exit_error>'
'SillyTest__<no_error>'
'SillyTest__<outer_context_enter_error>'
'SillyTest__<outer_context_exit_error>'
'SillyTest__<setUp_error>'
'SillyTest__<tearDown_error>'
'SillyTest__<test_error>'
'SillyTest__<test_fail>'
>>>
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test (...SillyTest__<inner_context_enter_error>) ... ERROR
test (...SillyTest__<inner_context_exit_error>) ... ERROR
test (...SillyTest__<no_error>) ... ok
test (...SillyTest__<outer_context_enter_error>) ... ERROR
test (...SillyTest__<outer_context_exit_error>) ... ERROR
test (...SillyTest__<setUp_error>) ... ERROR
test (...SillyTest__<tearDown_error>) ... ERROR
test (...SillyTest__<test_error>) ... ERROR
test (...SillyTest__<test_fail>) ... FAIL
...Ran 9 tests...
FAILED (failures=1, errors=7)
>>> debug == [
... # inner_context_enter_error
... 'enter:outer',
... 'ERR-enter:inner-context-enter-error',
... 'ERR-exit:outer',
...
... # inner_context_exit_error
... 'enter:outer',
... 'enter:inner-context-exit-error',
... 'setUp',
... 'test',
... 'tearDown',
... 'ERR-exit:inner-context-exit-error',
... 'ERR-exit:outer',
...
... # no_error
... 'enter:outer',
... 'enter:inner',
... 'setUp',
... 'test',
... 'tearDown',
... 'exit:inner',
... 'exit:outer',
...
... # outer_context_enter_error
... 'ERR-enter:outer-context-enter-error',
...
... # outer_context_exit_error
... 'enter:outer-context-exit-error',
... 'enter:inner',
... 'setUp',
... 'test',
... 'tearDown',
... 'exit:inner',
... 'ERR-exit:outer-context-exit-error',
...
... # setUp_error
... 'enter:outer',
... 'enter:inner',
... 'ERR-setUp',
... 'ERR-exit:inner',
... 'ERR-exit:outer',
...
... # tearDown_error
... 'enter:outer',
... 'enter:inner',
... 'setUp',
... 'test',
... 'ERR-tearDown',
... 'ERR-exit:inner',
... 'ERR-exit:outer',
...
... # test_error
... 'enter:outer',
... 'enter:inner',
... 'setUp',
... 'ERROR-test', # note:
... 'tearDown', # *not* ERR-tearDown
... 'exit:inner', # *not* ERR-exit:inner
... 'exit:outer', # *not* ERR-exit:outer
...
... # test_fail
... 'enter:outer',
... 'enter:inner',
... 'setUp',
... 'FAIL-test', # note:
... 'tearDown', # *not* ERR-tearDown
... 'exit:inner', # *not* ERR-exit:inner
... 'exit:outer', # *not* ERR-exit:outer
... ]
True
Note that contexts attached to test *class* params (in contrast to
those attached to test *method* params -- see: :ref:`context-basics`)
are automatically handled within :meth:`setUp` and (if applicable)
:meth:`tearDown` -- so :meth:`setUp` and :meth:`tearDown` *are*
affected by errors related to those contexts. On the other hand,
context finalization is *not* affected by any exceptions from actual
test methods (i.e. context managers' :meth:`__exit__` methods are
called with ``None, None, None`` arguments anyway -- unless
:meth:`setUp`/:meth:`tearDown` or an enclosed context manager's
:meth:`__enter__`/:meth:`__exit__` raises an exception).
Additional note about extending :meth:`setUp` and :meth:`tearDown`
------------------------------------------------------------------
.. warning::
This is the description of a deprecated feature.
As you can see in the above examples, you can, without any problem,
implement your own :meth:`setUp` and/or :meth:`tearDown` methods in
test classes decorated with :func:`foreach` and :func:`expand`; the
*unittest_expander* machinery, which provides its own version of these
methods, will incorporate your implementations automatically -- by
obtaining them with func:`super` and calling (*within* the scope of
any contexts that have been attached to your parameters with
:meth:`param.context` or :meth:`paramseq`.context).
However, if you need to create a subclass of one of the test classes
generated by :func:`expand` applied to a class decorated with
:func:`foreach` -- you need to obey the following rules:
* you shall not apply :func:`foreach` to that subclass or any class
that inherits from it (though you can still apply :func:`foreach` to
methods of the subclass);
* when extending :meth:`setUp` and/or :meth:`tearDown` methods:
* in :meth:`setUp`, calling :meth:`setUp` of the superclass should be
the first action;
* in :meth:`tearDown`, calling :meth:`tearDown` of the superclass
should be the last action -- and you shall ensure (by using a
``finally`` clause) that this action is *always* executed.
For example:
>>> # the SillyTest__<no_error> class from the previous code snippet
>>> base = into_dict['SillyTest__<no_error>']
>>>
>>> class SillyTestSubclass(base):
...
... def setUp(self):
... debug.append('*** before everything ***')
... # <- at this point no contexts are active (and there are
... # no self.params, self.label, self.context_targets etc.)
... super(SillyTestSubclass, self).setUp()
... # *HERE* is the place for your extension's implementation
... debug.append('*** SillyTestSubclass.setUp ***')
... assert hasattr(self, 'params')
... assert hasattr(self, 'label')
... assert hasattr(self, 'context_targets')
...
... def tearDown(self):
... try:
... # *HERE* is the place for your extension's implementation
... debug.append('*** SillyTestSubclass.tearDown ***')
... finally:
... super(SillyTestSubclass, self).tearDown()
... # <- at this point no contexts are active
... debug.append('*** after everything ***')
...
>>> debug = []
>>> run_tests(SillyTestSubclass) # doctest: +ELLIPSIS
test (...SillyTestSubclass) ... ok
...Ran 1 test...
OK
>>> debug == [
... '*** before everything ***',
... 'enter:outer',
... 'enter:inner',
... 'setUp',
... '*** SillyTestSubclass.setUp ***',
... 'test',
... '*** SillyTestSubclass.tearDown ***',
... 'tearDown',
... 'exit:inner',
... 'exit:outer',
... '*** after everything ***',
... ]
True
.. _contexts-cannot-suppress-exceptions:
Contexts cannot suppress exceptions unless you enable that explicitly
=====================================================================
The Python *context manager* protocol provides a way to suppress an
exception occuring in the code enclosed by a context: the exception is
*suppresed* (*not* propagated) if the context manager's
:meth:`__exit__` method returns a *true* value (such as :obj:`True`).
It does **not** apply to context managers declared with
:meth:`param.context` or :meth:`paramseq.context`: if :meth:`__exit__`
of such a context manager returns a *true* value it is ignored and the
exception (if any) is propagated anyway. The rationale of this
behavior is that suppressing exceptions is generally not a good idea
when dealing with testing (it could easily make your tests leaky and
useless).
However, if you **really** need to allow your context manager to
suppress exceptions, pass the keyword argument
``__enable_exc_suppress__=True`` to the :meth:`param.context` or
:meth:`paramseq.context` method (and, of course, make the
:meth:`__exit__` context manager's method return a *true* value).
>>> class SillySuppressingCM(object):
... def __enter__(self): return self
... def __exit__(self, exc_type, exc_val, exc_tb):
... if exc_type is not None:
... debug.append('suppressing {0}'.format(exc_type.__name__))
... return True # suppress any exception
...
>>> @expand
... class SillyExcTest(unittest.TestCase):
...
... @foreach(
... param(test_error=AssertionError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True),
... param(test_error=KeyError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True),
... )
... def test_it(self, test_error):
... debug.append('raising {0}'.format(test_error.__name__))
... raise test_error('ha!')
...
>>> debug = []
>>> run_tests(SillyExcTest) # doctest: +ELLIPSIS
test_it__... ok
test_it__... ok
...Ran 2 tests...
OK
>>> debug == [
... 'raising AssertionError',
... 'suppressing AssertionError',
... 'raising KeyError',
... 'suppressing KeyError',
... ]
True
Another example:
>>> class ErrorCM(object):
... def __init__(self, error): self.error = error
... def __enter__(self): return self
... def __exit__(self, exc_type, exc_val, exc_tb):
... if exc_type is not None:
... debug.append('replacing {0} with {1}'.format(
... exc_type.__name__, self.error.__name__))
... else:
... debug.append('raising {0}'.format(self.error.__name__))
... raise self.error('argh!')
...
>>> into_dict = {}
>>> @expand(into=into_dict)
... @foreach([
... param(setup_error=OSError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True),
... param(setup_error=OSError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True)
... .context(ErrorCM, error=TypeError),
... param(setup_error=None),
... ])
... class AnotherSillyExcTest(unittest.TestCase):
...
... def setUp(self):
... if self.setup_error is not None:
... debug.append('raising {0}'.format(self.setup_error.__name__))
... raise self.setup_error('ooops!')
...
... @foreach([
... param(test_error=AssertionError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True),
... param(test_error=KeyError)
... .context(SillySuppressingCM, __enable_exc_suppress__=True)
... .context(ErrorCM, error=RuntimeError),
... ])
... def test_it(self, test_error):
... debug.append('raising {0}'.format(test_error.__name__))
... raise test_error('ha!')
...
>>> debug = []
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_it__... ok
test_it__... ok
test_it__... ok
test_it__... ok
test_it__... ok
test_it__... ok
...Ran 6 tests...
OK
>>> debug == [
... 'raising OSError',
... 'suppressing OSError',
... 'raising AssertionError',
... 'suppressing AssertionError',
...
... 'raising OSError',
... 'suppressing OSError',
... 'raising KeyError',
... 'replacing KeyError with RuntimeError',
... 'suppressing RuntimeError',
...
... 'raising OSError',
... 'replacing OSError with TypeError',
... 'suppressing TypeError',
... 'raising AssertionError',
... 'suppressing AssertionError',
...
... 'raising OSError',
... 'replacing OSError with TypeError',
... 'suppressing TypeError',
... 'raising KeyError',
... 'replacing KeyError with RuntimeError',
... 'suppressing RuntimeError',
...
... 'raising AssertionError',
... 'suppressing AssertionError',
...
... 'raising KeyError',
... 'replacing KeyError with RuntimeError',
... 'suppressing RuntimeError',
... ]
True
Normally -- without ``__enable_exc_suppress__=True`` -- exceptions
*are* propagated even when :meth:`__exit__` returns a *true* value:
>>> into_dict = {}
>>> @expand(into=into_dict)
... @foreach([
... param(setup_error=OSError)
... .context(SillySuppressingCM),
... param(setup_error=OSError)
... .context(SillySuppressingCM)
... .context(ErrorCM, error=TypeError),
... param(setup_error=None),
... ])
... class AnotherSillyExcTest2(unittest.TestCase):
...
... def setUp(self):
... if self.setup_error is not None:
... raise self.setup_error('ooops!')
...
... @foreach([
... param(test_error=AssertionError)
... .context(SillySuppressingCM),
... param(test_error=KeyError)
... .context(SillySuppressingCM)
... .context(ErrorCM, error=RuntimeError),
... ])
... def test_it(self, test_error):
... raise test_error('ha!')
...
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_it__... ERROR
test_it__... ERROR
test_it__... ERROR
test_it__... ERROR
test_it__... FAIL
test_it__... ERROR
...Ran 6 tests...
FAILED (failures=1, errors=5)
It is worth emphasizing that ``__enable_exc_suppress__=True`` changes
nothing when context manager's :meth:`__exit__` returns a false value:
>>> into_dict = {}
>>> @expand(into=into_dict)
... @foreach([
... param(setup_error=OSError)
... .context(SillySuppressingCM),
... param(setup_error=OSError)
... .context(SillySuppressingCM)
... .context(ErrorCM, error=TypeError,
... __enable_exc_suppress__=True),
... param(setup_error=None),
... ])
... class AnotherSillyExcTest3(unittest.TestCase):
...
... def setUp(self):
... if self.setup_error is not None:
... raise self.setup_error('ooops!')
...
... @foreach([
... param(test_error=AssertionError)
... .context(SillySuppressingCM),
... param(test_error=KeyError)
... .context(SillySuppressingCM)
... .context(ErrorCM, error=RuntimeError,
... __enable_exc_suppress__=True),
... ])
... def test_it(self, test_error):
... raise test_error('ha!')
...
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_it__... ERROR
test_it__... ERROR
test_it__... ERROR
test_it__... ERROR
test_it__... FAIL
test_it__... ERROR
...Ran 6 tests...
FAILED (failures=1, errors=5)
.. _about-substitute:
:class:`Substitute` objects
===========================
One could ask: "What does the :func:`expand` decorator do with the
original objects (classes or methods) decorated with :func:`foreach`?"
>>> @expand
... @foreach(useless_data)
... class DummyTest(unittest.TestCase):
...
... @foreach(1, 2)
... def test_it(self, x):
... pass
...
... attr = [42]
... test_it.attr = [43, 44]
They cannot be left where they are because, without parameterization,
they are not valid tests (but rather kind of test templates). For that
reason, they are always replaced (by the :func:`expand`'s machinery)
with :class:`Substitute` instances:
>>> DummyTest # doctest: +ELLIPSIS
<...Substitute object at 0x...>
>>> DummyTest.actual_object # doctest: +ELLIPSIS
<class '...DummyTest'>
>>> DummyTest.attr
[42]
>>> DummyTest.attr is DummyTest.actual_object.attr
True
>>> (set(dir(DummyTest.actual_object)) - {'__call__'}
... ).issubset(dir(DummyTest))
True
>>> test_it = DummyTest.test_it
>>> test_it # doctest: +ELLIPSIS
<...Substitute object at 0x...>
>>> test_it.actual_object # doctest: +ELLIPSIS
<...test_it...>
>>> test_it.attr
[43, 44]
>>> test_it.attr is test_it.actual_object.attr
True
>>> (set(dir(test_it.actual_object)) - {'__call__'}
... ).issubset(dir(test_it))
True
As you see, such a :class:`Substitute` instance is kind of a
non-callable proxy to the original class or method (preventing it from
being included by test loaders but still keeping it available, e.g. for
introspection).
.. _custom-name-formatting:
Custom method/class name formatting
===================================
If you don't like how parameterized method/class names are formatted --
you can customize that globally by:
* setting :attr:`expand.global_name_pattern` to a :meth:`str.format`-like
formattable pattern containing zero or more of the following format
fields:
* ``{base_name}`` -- the name of the original test method or class,
* ``{base_obj}`` -- the original test method or class,
* ``{label}`` -- the test label (automatically generated or
explicitly specified with :meth:`param.label`),
* ``{count}`` -- consecutive number of a generated parameterized
method or class;
(in future versions of the library other format fields may be added)
and/or
* setting :attr:`expand.global_name_formatter` to an instance of a
custom subclass of the :class:`string.Formatter` class from the
Python standard library (or to any object whose :meth:`format`
method acts similarily to :meth:`string.Formatter.format`).
For example:
>>> expand.global_name_pattern = '{base_name}__parameterized_{count}'
>>>
>>> into_dict = {}
>>>
>>> @expand(into=into_dict)
... @foreach(params_with_contexts)
... @foreach(useless_data)
... class TestSaveLoad(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
...
... @foreach(param(suffix=' '), param(suffix='XX'))
... def test_save_load(self, suffix):
... self.file.write(self.save + suffix)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load + suffix)
...
>>> for name in sorted(into_dict): # doctest: +ELLIPSIS
... name
...
'TestSaveLoad__parameterized_1'
'TestSaveLoad__parameterized_2'
'TestSaveLoad__parameterized_3'
'TestSaveLoad__parameterized_4'
>>>
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_save_load__parameterized_1 (...TestSaveLoad__parameterized_1) ... ok
test_save_load__parameterized_2 (...TestSaveLoad__parameterized_1) ... ok
test_save_load__parameterized_1 (...TestSaveLoad__parameterized_2) ... ok
test_save_load__parameterized_2 (...TestSaveLoad__parameterized_2) ... ok
test_save_load__parameterized_1 (...TestSaveLoad__parameterized_3) ... ok
test_save_load__parameterized_2 (...TestSaveLoad__parameterized_3) ... ok
test_save_load__parameterized_1 (...TestSaveLoad__parameterized_4) ... ok
test_save_load__parameterized_2 (...TestSaveLoad__parameterized_4) ... ok
...Ran 8 tests...
OK
...or, let's say:
>>> import string
>>> class SillyFormatter(string.Formatter):
... def format(self, format_string, *args, **kwargs):
... label = kwargs['label']
... if '42' in label:
... return '!{0}!'.format(label)
... else:
... result = super(SillyFormatter,
... self).format(format_string, *args, **kwargs)
... if isinstance(kwargs['base_obj'], type):
... result = result.replace('_', '^')
... return result
...
>>> expand.global_name_formatter = SillyFormatter()
>>>
>>> into_dict = {}
>>>
>>> @expand(into=into_dict)
... @foreach(params_with_contexts)
... @foreach(*useless_data)
... class TestSaveLoad(unittest.TestCase):
...
... def setUp(self):
... self.file = self.context_targets[0]
...
... @foreach([param(suffix=' '), param(suffix='XX')])
... def test_save_load(self, suffix):
... self.file.write(self.save + suffix)
... self.file.seek(0)
... load_actually = self.file.read()
... self.assertEqual(load_actually, self.load + suffix)
...
>>> for name in sorted(into_dict): # doctest: +ELLIPSIS
... name
...
"!'foo',b=42, load='',save=''!"
"!'foo',b=42, load='abc',save='abc'!"
'TestSaveLoad^^parameterized^3'
'TestSaveLoad^^parameterized^4'
>>>
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_save_load__parameterized_1 (...!'foo',b=42, load='',save=''!) ... ok
test_save_load__parameterized_2 (...!'foo',b=42, load='',save=''!) ... ok
test_save_load__parameterized_1 (...!'foo',b=42, load='abc',save='abc'!) ... ok
test_save_load__parameterized_2 (...!'foo',b=42, load='abc',save='abc'!) ... ok
test_save_load__parameterized_1 (...TestSaveLoad^^parameterized^3) ... ok
test_save_load__parameterized_2 (...TestSaveLoad^^parameterized^3) ... ok
test_save_load__parameterized_1 (...TestSaveLoad^^parameterized^4) ... ok
test_save_load__parameterized_2 (...TestSaveLoad^^parameterized^4) ... ok
...Ran 8 tests...
OK
Set those attributes to :obj:`None` to restore the default behavior:
>>> expand.global_name_pattern = None
>>> expand.global_name_formatter = None
.. _avoiding-name-clashes:
Name clashes avoided automatically
==================================
:func:`expand` tries to avoid name clashes: when it detects a clash it
adds a suffix to a newly formatted name, e.g.:
>>> def setting_attrs(attr_dict):
... def deco(cls):
... for k, v in attr_dict.items():
... setattr(cls, k, v)
... return cls
... return deco
...
>>> into_dict = {
... "Test_is_even__<'foo',b=42>": ('spam', 'spam', 'spam'),
... }
>>> extra_attrs = {
... 'test_even__<4>': 'something',
... 'test_even__<4>__2': None,
... }
>>>
>>> @expand(into=into_dict)
... @foreach(useless_data)
... @setting_attrs(extra_attrs)
... class Test_is_even(unittest.TestCase):
...
... @foreach(
... 0,
... 4,
... 0, # <- repeated parameter value
... 0, # <- repeated parameter value
... -16,
... 0, # <- repeated parameter value
... )
... def test_even(self, n):
... self.assertTrue(is_even(n))
...
>>> for name, obj in sorted(into_dict.items()): # doctest: +ELLIPSIS
... if obj != ('spam', 'spam', 'spam'):
... name
...
"Test_is_even__<'foo',b=42>__2"
"Test_is_even__<'foo',b=433>"
>>>
>>> test_classes = [into_dict[name] for name, obj in sorted(into_dict.items())
... if obj != ('spam', 'spam', 'spam')]
...
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test_even__<-16> (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<0> (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<0>__2 (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<0>__3 (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<0>__4 (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<4>__3 (...Test_is_even__<'foo',b=42>__2) ... ok
test_even__<-16> (...Test_is_even__<'foo',b=433>) ... ok
test_even__<0> (...Test_is_even__<'foo',b=433>) ... ok
test_even__<0>__2 (...Test_is_even__<'foo',b=433>) ... ok
test_even__<0>__3 (...Test_is_even__<'foo',b=433>) ... ok
test_even__<0>__4 (...Test_is_even__<'foo',b=433>) ... ok
test_even__<4>__3 (...Test_is_even__<'foo',b=433>) ... ok
...Ran 12 tests...
OK
Questions and answers about various details...
==============================================
"Can I omit :func:`expand` and then apply it to subclasses?"
------------------------------------------------------------
Yes, you can. Please consider the following example:
>>> debug = []
>>> into_dict = {}
>>>
>>> # see earlier definition of debug_cm()...
>>> class_params = paramseq(1, 2, 3).context(debug_cm, tag='C')
>>> method_params = paramseq(7, 8, 9).context(debug_cm, tag='M')
>>>
>>> @foreach(class_params)
... class MyTestMixIn(object):
...
... @foreach(method_params)
... def test(self, y):
... [x] = self.params
... debug.append((x, y, self.z))
...
>>> @expand(into=into_dict)
... class TestActual(MyTestMixIn, unittest.TestCase):
... z = 42
...
>>> for name in sorted(into_dict):
... name
...
'TestActual__<1>'
'TestActual__<2>'
'TestActual__<3>'
>>>
>>> test_classes = [into_dict[name] for name in sorted(into_dict)]
>>> run_tests(*test_classes) # doctest: +ELLIPSIS
test__<7> (...TestActual__<1>) ... ok
test__<8> (...TestActual__<1>) ... ok
test__<9> (...TestActual__<1>) ... ok
test__<7> (...TestActual__<2>) ... ok
test__<8> (...TestActual__<2>) ... ok
test__<9> (...TestActual__<2>) ... ok
test__<7> (...TestActual__<3>) ... ok
test__<8> (...TestActual__<3>) ... ok
test__<9> (...TestActual__<3>) ... ok
...Ran 9 tests...
OK
>>> type(MyTestMixIn) is type # MyTestMixIn not touched by @expand
True
>>> type(TestActual) is Substitute
True
>>> debug == [
... 'enter:C', 'enter:M', (1, 7, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (1, 8, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (1, 9, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (2, 7, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (2, 8, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (2, 9, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (3, 7, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (3, 8, 42), 'exit:M', 'exit:C',
... 'enter:C', 'enter:M', (3, 9, 42), 'exit:M', 'exit:C',
... ]
True
Note, however, that you probably should name such mix-in or "test
template" base classes in a way that will prevent the test loader you
use from including them; for the same reason, most often, it is
probably better not to make them subclasses of
:class:`unittest.TestCase`.
"Can I :func:`expand` a subclass of an already :func:`expand`-ed class?"
------------------------------------------------------------------------
As long as you do *not* apply :func:`foreach` to test classes (but
only to test methods) -- *yes, you can* (in past *unittest_expander*
versions it was broken but now it works perfectly):
>>> debug = []
>>> into_dict = {}
>>> parameters = paramseq(
... 1, 2, 3,
... ).context(debug_cm) # see earlier definition of debug_cm()...
>>>
>>> @expand
... class Test(unittest.TestCase):
...
... @foreach(parameters)
... def test(self, n):
... debug.append(n)
...
>>> @expand
... class TestSubclass(Test):
...
... @foreach(parameters)
... def test_another(self, n):
... debug.append(n)
...
>>> run_tests(TestSubclass) # doctest: +ELLIPSIS
test__<1> (...TestSubclass) ... ok
test__<2> (...TestSubclass) ... ok
test__<3> (...TestSubclass) ... ok
test_another__<1> (...TestSubclass) ... ok
test_another__<2> (...TestSubclass) ... ok
test_another__<3> (...TestSubclass) ... ok
...Ran 6 tests...
OK
>>> type(TestSubclass.test) is Substitute
True
>>> type(TestSubclass.test_another) is Substitute
True
But things complicate when you apply :func:`foreach` to classes. For
such cases the answer is: *do not try this at home*.
As it was said earlier, the parts of *unittest_expander* related to
applying :func:`foreach` to classes are broken by design and will be
revamped (in a backwards incompatible way), or even completely
removed, in future versions of *unittest_expander*.
"Do my test classes need to inherit from :class:`unittest.TestCase`?"
---------------------------------------------------------------------
No, it doesn't matter from the point of view of the
*unittest_expander* machinery.
>>> debug = []
>>> into_dict = {}
>>> parameters = paramseq(
... 1, 2, 3,
... ).context(debug_cm) # see earlier definition of debug_cm()...
>>>
>>> @expand
... class Test(object): # not a unittest.TestCase subclass
...
... @foreach(parameters)
... def test(self, n):
... debug.append(n)
...
>>> # confirming that unittest_expander machinery acted properly:
>>> instance = Test()
>>> type(instance.test) is Substitute
True
>>> t1 = getattr(instance, 'test__<1>')
>>> t2 = getattr(instance, 'test__<2>')
>>> t3 = getattr(instance, 'test__<3>')
>>> t1()
>>> t2()
>>> t3()
>>> debug == [
... 'enter', 1, 'exit',
... 'enter', 2, 'exit',
... 'enter', 3, 'exit',
... ]
True
However, note that if you decorate your test class (and not only its
methods) with :func:`foreach` the test running tools you use are
expected to call :meth:`setUp` and :meth:`tearDown` methods
appropriately -- as *unittest*'s test running machinery does (though
your test class does not need to implement these methods by itself).
.. warning::
This is the description of a deprecated feature.
>>> debug = []
>>> into_dict = {}
>>>
>>> @expand(into=into_dict)
... @foreach(parameters)
... class Test(object): # not a unittest.TestCase subclass
...
... def test(self):
... assert len(self.params) == 1
... n = self.params[0]
... debug.append(n)
...
>>> # confirming that unittest_expander machinery acted properly:
>>> type(Test) is Substitute
True
>>> orig_cls = Test.actual_object
>>> type(orig_cls) is type
True
>>> orig_cls.__bases__ == (object,)
True
>>> orig_cls.__name__ == 'Test'
True
>>> not hasattr(orig_cls, 'setUp') and not hasattr(orig_cls, 'tearDown')
True
>>> cls1 = into_dict['Test__<1>']
>>> cls2 = into_dict['Test__<2>']
>>> cls3 = into_dict['Test__<3>']
>>> issubclass(cls1, orig_cls)
True
>>> issubclass(cls2, orig_cls)
True
>>> issubclass(cls3, orig_cls)
True
>>> hasattr(cls1, 'setUp') and hasattr(cls1, 'tearDown')
True
>>> hasattr(cls2, 'setUp') and hasattr(cls2, 'tearDown')
True
>>> hasattr(cls3, 'setUp') and hasattr(cls3, 'tearDown')
True
>>> instance1 = cls1()
>>> instance2 = cls2()
>>> instance3 = cls3()
>>> for inst in [instance1, instance2, instance3]:
... # doing what any reasonable test runner should do
... inst.setUp()
... try: inst.test()
... finally: inst.tearDown()
...
>>> debug == [
... 'enter', 1, 'exit',
... 'enter', 2, 'exit',
... 'enter', 3, 'exit',
... ]
True
"What happens if I apply :func:`expand` when there's no :func:`foreach`?"
-------------------------------------------------------------------------
Just nothing -- the test works as if :func:`expand` was not applied at
all:
>>> @expand
... class TestIt(unittest.TestCase):
...
... def test(self):
... sys.stdout.write(' [DEBUG: OK] ')
...
>>> run_tests(TestIt) # doctest: +ELLIPSIS
test ... [DEBUG: OK] ok
...Ran 1 test...
OK
>>> into_dict = {}
>>> @expand(into=into_dict)
... class TestIt2(unittest.TestCase):
...
... def test(self):
... sys.stdout.write(' [DEBUG: OK] ')
...
>>> run_tests(TestIt2) # doctest: +ELLIPSIS
test ... [DEBUG: OK] ok
...Ran 1 test...
OK
>>> into_dict
{}
"To what objects can :func:`foreach` be applied?"
-------------------------------------------------
The :func:`foreach` decorator is designed to be applied *only* to:
* functions being members of test (or test mix-in) classes (that is,
functions that define regular test case instance methods);
* test (or test mix-in) classes themselves
(as noted above, the latter is a deprecated feature).
You should *not* apply it to anything else (especially, not to static
or class methods). If you do, the effect is undefined: an error may
occur (immediately or, for example, when applying :func:`expand`) or,
e.g., the whole thing may be just ignored.
.. doctest::
:hide:
Other checks
============
For completeness, let's also check some other usage cases and
error conditions...
>>> isinstance(paramseq(), paramseq)
True
>>> isinstance(paramseq(1, 2), paramseq)
True
>>> isinstance(paramseq(1, two=2), paramseq)
True
>>> isinstance(paramseq([1, 2]), paramseq)
True
>>> isinstance(paramseq({1, 2}), paramseq)
True
>>> isinstance(paramseq(a=3, b=4), paramseq)
True
>>> isinstance(paramseq(paramseq([1, 2])), paramseq)
True
>>> isinstance(paramseq([1, 2]) + {3, 4} + (5, 6), paramseq)
True
>>> isinstance({3, 4} + paramseq([1, 2]) + (5, 6), paramseq)
True
>>> paramseq([1, 2]) + 3 # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...
>>> 3 + paramseq([1, 2]) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...
>>> paramseq('123') # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...not a legal parameter collection...
>>> expand(illegal_arg='spam') # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...unexpected keyword arguments...
>>> @expand
... class Test(unittest.TestCase):
... @foreach(42) # <- single arg that is not a proper param collection
... def test(self):
... pass # doctest: +ELLIPSIS
...
Traceback (most recent call last):
TypeError: ...not a legal parameter collection...
>>> @expand(into=['badtype']) # doctest: +ELLIPSIS
... @foreach(1, 2)
... class Test(unittest.TestCase):
... pass
...
Traceback (most recent call last):
TypeError: ...resolved 'into' argument is not a mutable mapping...
>>> class Some1(object): pass
>>> not_a_method = Some1()
>>> @expand # doctest: +ELLIPSIS
... class Test(unittest.TestCase):
... wrong = foreach([1, 2])(not_a_method)
...
Traceback (most recent call last):
TypeError: ...is not a...
>>> class Some2(object): __dir__ = lambda self: []
>>> not_a_class = Some2()
>>> expand(foreach([1, 2])(not_a_class)
... ) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...is not a class...
>>> debug = []
>>> @expand
... class Test(unittest.TestCase):
... @foreach([
... param(),
... ])
... def test(self, **kwargs):
... # **kwargs means accepting `label` and `context_targets`
... debug.append(sorted(kwargs.keys()))
...
>>> run_tests(Test) # doctest: +ELLIPSIS
test__<> ... ok
...Ran 1 test...
OK
>>> debug == [
... ['context_targets', 'label'],
... ]
True
>>> type(Test.test) is Substitute
True
>>> import sys
>>> no_qn = sys.version_info < (3, 3)
>>> no_qn or getattr(Test, 'test__<>').__qualname__ == 'Test.test__<>'
True
>>> @expand
... class Test(unittest.TestCase):
... @foreach([
... param(),
... ])
... def test(self):
... pass
... test.__qualname__ = 'Test.test_foo'
...
>>> no_qn or getattr(Test, 'test__<>').__qualname__ == '<...>.test__<>'
True
>>> into_dict = {}
>>> @expand(into=into_dict)
... @foreach([42])
... class Test(unittest.TestCase):
... pass
...
>>> no_qn or into_dict['Test__<42>'].__qualname__ == 'Test__<42>'
True
>>> @expand(into=into_dict)
... @foreach(42) # <- single arg that is not a proper param collection
... class Test(unittest.TestCase):
... pass # doctest: +ELLIPSIS
...
Traceback (most recent call last):
TypeError: ...not a legal parameter collection...
>>> @expand
... class Test(unittest.TestCase):
... @foreach([123])
... class TestNested(unittest.TestCase):
... pass
...
>>> issubclass(Test.TestNested, unittest.TestCase)
True
>>> sorted(k for k in vars(Test).keys() if not k.startswith('__'))
['TestNested']
>>> @expand
... class Test(unittest.TestCase):
... into_dict = {}
... @expand(into=into_dict)
... @foreach([123])
... class TestNested(unittest.TestCase):
... pass
...
>>> type(Test.TestNested) is Substitute
True
>>> sorted(k for k in vars(Test).keys() if not k.startswith('__'))
['TestNested', 'into_dict']
>>> sorted(Test.into_dict.keys())
['TestNested__<123>']
>>> into_dict = {}
>>> @expand(into=into_dict)
... @foreach([42])
... class Test(unittest.TestCase):
... def setUp(self):
... sys.stdout.write(' | .context_targets={0!r} | '.format(
... self.context_targets))
... def test(self):
... pass
...
>>> test_cls = into_dict.popitem()[1]
>>> run_tests(test_cls) # doctest: +ELLIPSIS
test... | .context_targets=[] | ok
...Ran 1 test...
OK
"""
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import functools
import inspect
import itertools
import string
import sys
import types
import warnings
__all__ = (
'foreach',
'expand',
'param',
'paramseq',
'Substitute',
)
_PY3 = sys.version_info[0] >= 3
_CLASS_TYPES = (type,) if _PY3 else (type, types.ClassType)
_STRING_TYPES = (str,) if _PY3 else (str, unicode)
_PARAMSEQ_OBJS_ATTR = '__attached_paramseq_objs'
_GENERIC_KWARGS = 'context_targets', 'label'
_DEFAULT_PARAMETRIZED_NAME_PATTERN = '{base_name}__<{label}>'
_DEFAULT_PARAMETRIZED_NAME_FORMATTER = string.Formatter()
if _PY3:
def _get_context_manager_enter_and_exit(cm):
# for similarity with the `with` statement's behaviour:
# *first* get the __exit__ attribute, *then* the __enter__ attribute
# (get both from the class and bind to the instance)
cm_type = type(cm)
exit = types.MethodType(cm_type.__exit__, cm)
enter = types.MethodType(cm_type.__enter__, cm)
return enter, exit
else:
def _get_context_manager_enter_and_exit(cm):
### TODO: update this behavior to match modern Python versions...
# for similarity with the `with` statement's behaviour,
# *first* get the __exit__ attribute, *then* the __enter__ attribute
cm_type = type(cm)
if cm_type is types.InstanceType:
# (old-style class -> get from the instance)
exit = cm.__exit__
enter = cm.__enter__
else:
# (new-style class -> get from the class and bind to the instance)
exit = types.MethodType(cm_type.__exit__.__func__, cm, cm_type)
enter = types.MethodType(cm_type.__enter__.__func__, cm, cm_type)
return enter, exit
class _DisabledExcSuppressContextManagerWrapper(object):
def __init__(self, cm):
self._enter, self._exit = _get_context_manager_enter_and_exit(cm)
def __enter__(self):
return self._enter()
def __exit__(self, exc_type, exc_val, exc_tb):
self._exit(exc_type, exc_val, exc_tb)
return False # exception is *never* suppressed
class _Context(object):
def __init__(self, context_manager_factory, *args, **kwargs):
self._context_manager_factory = context_manager_factory
self._enable_exc_suppress = kwargs.pop(
'__enable_exc_suppress__', False)
self._args = args
self._kwargs = kwargs
def _make_context_manager(self):
cm = self._context_manager_factory(*self._args, **self._kwargs)
if self._enable_exc_suppress:
return cm
else:
return _DisabledExcSuppressContextManagerWrapper(cm)
class Substitute(object):
def __init__(self, actual_object):
self.actual_object = actual_object
def __getattribute__(self, name):
if name in ('actual_object', '__class__', '__call__'):
return super(Substitute, self).__getattribute__(name)
return getattr(self.actual_object, name)
def __dir__(self):
names = ['actual_object']
names.extend(
name for name in dir(self.actual_object)
if name not in ('actual_object', '__call__'))
return names
class param(object):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._context_list = []
self._label_list = []
def context(self, context_manager_factory, *args, **kwargs):
context = _Context(context_manager_factory, *args, **kwargs)
return self._clone_adding(context_list=[context])
def label(self, text):
return self._clone_adding(label_list=[text])
@classmethod
def _from_param_item(cls, param_item):
if isinstance(param_item, param):
return param_item
if isinstance(param_item, tuple):
return cls(*param_item)
return cls(param_item)
@classmethod
def _combine_instances(cls, param_instances):
new = cls()
for param_inst in param_instances:
new = new._clone_adding(
args=param_inst._args,
kwargs=param_inst._kwargs,
context_list=param_inst._context_list,
# note: calling _get_label() here
label_list=[param_inst._get_label()])
return new
def _clone_adding(self, args=None, kwargs=None,
context_list=None, label_list=None):
new = self.__class__(*self._args, **self._kwargs)
new._context_list.extend(self._context_list)
new._label_list.extend(self._label_list)
if args:
new._args += tuple(args)
if kwargs:
conflicting = frozenset(new._kwargs).intersection(kwargs)
if conflicting:
raise ValueError(
'conflicting keyword arguments: ' +
', '.join(sorted(map(repr, conflicting))))
new._kwargs.update(kwargs)
if context_list:
new._context_list.extend(context_list)
if label_list:
new._label_list.extend(label_list)
return new
def _get_context_manager_factory(self):
try:
return self.__cached_cm_factory
except AttributeError:
if self._context_list:
# we need to combine several context managers (from the
# contexts) but Python 2 does not have contextlib.ExitStack,
# and contextlib.nested() is deprecated (for good reasons)
# -- so we will just generate and execute the code:
src_code = (
'import contextlib\n'
'@contextlib.contextmanager\n'
'def cm_factory():\n'
' context_targets = [None] * len(context_list)\n'
' {0}'
'yield context_targets\n'.format(''.join(
('with context_list[{0}]._make_context_manager() '
'as context_targets[{0}]:\n{next_indent}'
).format(i, next_indent=((8 + 4 * i) * ' '))
for i in range(len(self._context_list)))))
# Py2+Py3-compatible substitute of exec in a given namespace
code = compile(src_code, '<string>', 'exec')
namespace = {'context_list': self._context_list}
eval(code, namespace)
self.__cached_cm_factory = namespace['cm_factory']
else:
self.__cached_cm_factory = None
return self.__cached_cm_factory
def _get_label(self):
if self._label_list:
return ', '.join(label for label in self._label_list)
else:
short_repr = self._short_repr
args_reprs = (short_repr(val) for val in self._args)
kwargs_reprs = ('{0}={1}'.format(key, short_repr(val))
for key, val in sorted(self._kwargs.items()))
return ','.join(itertools.chain(args_reprs, kwargs_reprs))
@staticmethod
def _short_repr(obj, max_len=16):
r = repr(obj)
if len(r) > max_len:
r = '<{0}...>'.format(r.lstrip('<')[:max_len-5])
return r
class paramseq(object):
def __init__(*self_and_args, **kwargs):
self = self_and_args[0]
args = self_and_args[1:]
if len(args) == 1 and not kwargs:
# the sole positional argument is a parameter collection
# (being a collection of: parameter values, tuples of such
# values, or `param` instances)
self._init_with_param_collections(args[0])
else:
# each argument is a parameter value, or a tuple of such
# values, or a `param` instance -- explicitly labeled if
# the given argument is a keyword one
self._init_with_param_collections(args, kwargs)
def __add__(self, other):
if self._is_legal_param_collection(other):
return self._from_param_collections(self, other)
return NotImplemented
def __radd__(self, other):
if self._is_legal_param_collection(other):
return self._from_param_collections(other, self)
return NotImplemented
def context(self, context_manager_factory, *args, **kwargs):
context = _Context(context_manager_factory, *args, **kwargs)
new = self._from_param_collections(self)
new._context_list.append(context)
return new
@classmethod
def _from_param_collections(cls, *param_collections):
self = cls.__new__(cls)
self._init_with_param_collections(*param_collections)
return self
def _init_with_param_collections(self, *param_collections):
for param_col in param_collections:
if not self._is_legal_param_collection(param_col):
raise TypeError(
'{0!r} is not a legal parameter '
'collection'.format(param_col))
self._param_collections = param_collections
self._context_list = []
@staticmethod
def _is_legal_param_collection(obj):
return (
isinstance(obj, (
paramseq,
collections_abc.Sequence,
collections_abc.Set,
collections_abc.Mapping)
) and
not isinstance(obj, _STRING_TYPES)
) or callable(obj)
def _generate_params(self, test_cls):
for param_inst in self._generate_raw_params(test_cls):
if self._context_list:
param_inst = param_inst._clone_adding(
context_list=self._context_list)
yield param_inst
def _generate_raw_params(self, test_cls):
for param_col in self._param_collections:
if isinstance(param_col, paramseq):
for param_inst in param_col._generate_params(test_cls):
yield param_inst
elif isinstance(param_col, collections_abc.Mapping):
for label, param_item in param_col.items():
yield param._from_param_item(param_item).label(label)
else:
if callable(param_col):
param_col = self._param_collection_callable_to_iterable(
param_col,
test_cls)
else:
assert isinstance(param_col, (collections_abc.Sequence,
collections_abc.Set))
for param_item in param_col:
yield param._from_param_item(param_item)
@staticmethod
def _param_collection_callable_to_iterable(param_col, test_cls):
try:
return param_col(test_cls)
except TypeError:
return param_col()
# test case *method* or *class* decorator...
def foreach(*args, **kwargs):
ps = paramseq(*args, **kwargs)
def decorator(func_or_cls):
stored_paramseq_objs = getattr(func_or_cls, _PARAMSEQ_OBJS_ATTR, None)
if stored_paramseq_objs is None:
stored_paramseq_objs = []
setattr(func_or_cls, _PARAMSEQ_OBJS_ATTR, stored_paramseq_objs)
assert isinstance(stored_paramseq_objs, list)
stored_paramseq_objs.append(ps)
if isinstance(func_or_cls, _CLASS_TYPES):
warnings.warn(
'decorating test *classes* with @foreach() will not be '
'supported in future versions of unittest_expander.',
DeprecationWarning)
return func_or_cls
return decorator
# test case *class* decorator...
def expand(test_cls=None, **kwargs):
into = kwargs.pop('into', None)
if kwargs:
raise TypeError(
'expand() got unexpected keyword arguments: ' +
', '.join(sorted(map(repr, kwargs))))
if test_cls is None:
return functools.partial(expand, into=into)
_expand_test_methods(test_cls)
return _expand_test_cls(test_cls, into)
def _expand_test_methods(test_cls):
attrs_to_substitute, attrs_to_add = _get_attrs_to_substitute_and_add(test_cls)
for name, obj in attrs_to_substitute.items():
setattr(test_cls, name, Substitute(obj))
for name, obj in attrs_to_add.items():
setattr(test_cls, name, obj)
def _get_attrs_to_substitute_and_add(test_cls):
attr_names = dir(test_cls)
seen_names = set(attr_names)
attrs_to_substitute = dict()
attrs_to_add = dict()
for base_name in attr_names:
obj = getattr(test_cls, base_name, None)
base_func = _get_base_func(obj)
if base_func is not None:
paramseq_objs = _get_paramseq_objs(base_func)
accepted_generic_kwargs = _get_accepted_generic_kwargs(base_func)
for func in _generate_parametrized_functions(
test_cls, paramseq_objs,
base_name, base_func, seen_names,
accepted_generic_kwargs):
attrs_to_add[func.__name__] = func
attrs_to_substitute[base_name] = obj
return attrs_to_substitute, attrs_to_add
def _get_base_func(obj):
if (getattr(obj, _PARAMSEQ_OBJS_ATTR, None) is None
or isinstance(obj, (Substitute, type))):
base_func = None
else:
base_func = _obtain_base_func_from(obj)
assert inspect.isfunction(base_func)
return base_func
def _get_paramseq_objs(base_func):
paramseq_objs = getattr(base_func, _PARAMSEQ_OBJS_ATTR)
assert isinstance(paramseq_objs, list)
return paramseq_objs
def _get_accepted_generic_kwargs(base_func):
accepted_generic_kwargs = _obtain_accepted_generic_kwargs_from(base_func)
# XXX: here additional stuff from `@takes_...()` decorators?
assert isinstance(accepted_generic_kwargs, set)
return accepted_generic_kwargs
if _PY3:
def _obtain_base_func_from(obj):
# no unbound methods in Python 3
if not isinstance(obj, types.FunctionType):
raise TypeError('{0!r} is not a function'.format(obj))
return obj
def _obtain_accepted_generic_kwargs_from(base_func):
spec = inspect.getfullargspec(base_func)
accepted_generic_kwargs = set(
_GENERIC_KWARGS if spec.varkw is not None
else (kw for kw in _GENERIC_KWARGS
if kw in (spec.args + spec.kwonlyargs)))
return accepted_generic_kwargs
else:
def _obtain_base_func_from(obj):
if not isinstance(obj, types.MethodType):
raise TypeError('{0!r} is not a method'.format(obj))
return obj.__func__
def _obtain_accepted_generic_kwargs_from(base_func):
spec = inspect.getargspec(base_func)
accepted_generic_kwargs = set(
_GENERIC_KWARGS if spec.keywords is not None
else (kw for kw in _GENERIC_KWARGS
if kw in spec.args))
return accepted_generic_kwargs
def _expand_test_cls(base_test_cls, into):
paramseq_objs = getattr(base_test_cls, _PARAMSEQ_OBJS_ATTR, None)
if paramseq_objs is None:
return base_test_cls
else:
assert isinstance(paramseq_objs, list)
if not isinstance(base_test_cls, _CLASS_TYPES):
raise TypeError('{0!r} is not a class'.format(base_test_cls))
into = _resolve_the_into_arg(into, globals_frame_depth=3)
seen_names = set(list(into.keys()) + [base_test_cls.__name__])
for cls in _generate_parametrized_classes(
base_test_cls, paramseq_objs, seen_names):
into[cls.__name__] = cls
return Substitute(base_test_cls)
def _resolve_the_into_arg(into, globals_frame_depth):
orig_into = into
if into is None:
into = sys._getframe(globals_frame_depth).f_globals['__name__']
if isinstance(into, _STRING_TYPES):
into = __import__(into, globals(), locals(), ['*'], 0)
if inspect.ismodule(into):
into = vars(into)
if not isinstance(into, collections_abc.MutableMapping):
raise TypeError(
"resolved 'into' argument is not a mutable mapping "
"({0!r} given, resolved to {1!r})".format(orig_into, into))
return into
def _generate_parametrized_functions(test_cls, paramseq_objs,
base_name, base_func, seen_names,
accepted_generic_kwargs):
for count, param_inst in enumerate(
_generate_params_from_sources(paramseq_objs, test_cls),
start=1):
yield _make_parametrized_func(base_name, base_func, count, param_inst,
seen_names, accepted_generic_kwargs)
def _generate_parametrized_classes(base_test_cls, paramseq_objs, seen_names):
for count, param_inst in enumerate(
_generate_params_from_sources(paramseq_objs, base_test_cls),
start=1):
yield _make_parametrized_cls(base_test_cls, count,
param_inst, seen_names)
def _generate_params_from_sources(paramseq_objs, test_cls):
src_params_iterables = [
ps._generate_params(test_cls)
for ps in paramseq_objs]
for params_row in itertools.product(*src_params_iterables):
yield param._combine_instances(params_row)
def _make_parametrized_func(base_name, base_func, count, param_inst,
seen_names, accepted_generic_kwargs):
p_args = param_inst._args
p_kwargs = param_inst._kwargs
label = param_inst._get_label()
cm_factory = param_inst._get_context_manager_factory()
if cm_factory is None:
@functools.wraps(base_func)
def generated_func(*args, **kwargs):
args += p_args
kwargs.update(**p_kwargs)
if 'context_targets' in accepted_generic_kwargs:
kwargs.setdefault('context_targets', [])
if 'label' in accepted_generic_kwargs:
kwargs.setdefault('label', label)
return base_func(*args, **kwargs)
else:
@functools.wraps(base_func)
def generated_func(*args, **kwargs):
args += p_args
kwargs.update(**p_kwargs)
with cm_factory() as context_targets:
if 'context_targets' in accepted_generic_kwargs:
kwargs.setdefault('context_targets', context_targets)
if 'label' in accepted_generic_kwargs:
kwargs.setdefault('label', label)
return base_func(*args, **kwargs)
delattr(generated_func, _PARAMSEQ_OBJS_ATTR)
generated_func.__name__ = _format_name_for_parametrized(
base_name, base_func, label, count, seen_names)
_set_qualname(base_func, generated_func)
return generated_func
def _make_parametrized_cls(base_test_cls, count, param_inst, seen_names):
cm_factory = param_inst._get_context_manager_factory()
label = param_inst._get_label()
class generated_test_cls(base_test_cls):
def setUp(self):
self.label = label
self.params = param_inst._args
for name, obj in param_inst._kwargs.items():
setattr(self, name, obj)
ready_exit = None
try:
if cm_factory is None:
self.context_targets = []
else:
cm = cm_factory()
enter, exit = _get_context_manager_enter_and_exit(cm)
self.context_targets = enter()
ready_exit = exit
self.__exit = ready_exit
try:
super_setUp = super(generated_test_cls, self).setUp
except AttributeError:
r = None
else:
r = super_setUp()
return r
except:
suppress_exc = False
if ready_exit is not None:
try:
suppress_exc = ready_exit(*sys.exc_info())
finally:
self.__exit = None
if not suppress_exc:
raise
def tearDown(self):
try:
try:
super_tearDown = super(generated_test_cls, self).tearDown
except AttributeError:
r = None
else:
r = super_tearDown()
except:
suppress_exc = False
exit = self.__exit
if exit is not None:
suppress_exc = exit(*sys.exc_info())
if not suppress_exc:
raise
else:
exit = self.__exit
if exit is not None:
exit(None, None, None)
return r
finally:
self.__exit = None
generated_test_cls.__module__ = base_test_cls.__module__
generated_test_cls.__name__ = _format_name_for_parametrized(
base_test_cls.__name__, base_test_cls, label, count, seen_names)
_set_qualname(base_test_cls, generated_test_cls)
return generated_test_cls
def _format_name_for_parametrized(base_name, base_obj,
label, count, seen_names):
pattern, formatter = _get_name_pattern_and_formatter()
name = stem_name = formatter.format(
pattern,
base_name=base_name,
base_obj=base_obj,
label=label,
count=count)
uniq_tag = 2
while name in seen_names:
# ensure that, for a particular class, names are unique
name = '{0}__{1}'.format(stem_name, uniq_tag)
uniq_tag += 1
seen_names.add(name)
return name
def _get_name_pattern_and_formatter():
pattern = getattr(expand, 'global_name_pattern', None)
if pattern is None:
pattern = _DEFAULT_PARAMETRIZED_NAME_PATTERN
formatter = getattr(expand, 'global_name_formatter', None)
if formatter is None:
formatter = _DEFAULT_PARAMETRIZED_NAME_FORMATTER
return pattern, formatter
def _set_qualname(base_obj, target_obj):
# relevant to Python 3
base_qualname = getattr(base_obj, '__qualname__', None)
if base_qualname is not None:
base_name = base_obj.__name__
qualname_prefix = (
base_qualname[:(len(base_qualname) - len(base_name))]
if (base_qualname == base_name or
base_qualname.endswith('.' + base_name))
else '<...>.')
target_obj.__qualname__ = qualname_prefix + target_obj.__name__
| zuo/unittest_expander | unittest_expander.py | Python | mit | 99,220 |
from django.conf import settings
settings.IMAGES_BACKEND = 'django_image.tests.backend.DummyBackend'
| adamcharnock/django-image | django_image/tests/__init__.py | Python | mit | 103 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import MySQLdb
# 打开数据库连接
db = MySQLdb.connect("localhost","root","root","stockanalyse" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 使用execute方法执行SQL语句
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取一条数据库。
data = cursor.fetchone()
print "Database version : %s " % data
# 关闭数据库连接
db.close()
| hecomlilong/basic | python/hello.py | Python | bsd-3-clause | 436 |
from ..errors import ErrorFolderNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable
from ..util import MNS, create_element
from .common import EWSAccountService, folder_ids_element, parse_folder_elem, shape_element
class GetFolder(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getfolder-operation"""
SERVICE_NAME = "GetFolder"
element_container_name = f"{{{MNS}}}Folders"
ERRORS_TO_CATCH_IN_RESPONSE = EWSAccountService.ERRORS_TO_CATCH_IN_RESPONSE + (
ErrorFolderNotFound,
ErrorNoPublicFolderReplicaAvailable,
ErrorInvalidOperation,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.folders = [] # A hack to communicate parsing args to _elems_to_objs()
def call(self, folders, additional_fields, shape):
"""Take a folder ID and returns the full information for that folder.
:param folders: a list of Folder objects
:param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects
:param shape: The set of attributes to return
:return: XML elements for the folders, in stable order
"""
# We can't easily find the correct folder class from the returned XML. Instead, return objects with the same
# class as the folder instance it was requested with.
self.folders = list(folders) # Convert to a list, in case 'folders' is a generator. We're iterating twice.
return self._elems_to_objs(
self._chunked_get_elements(
self.get_payload,
items=self.folders,
additional_fields=additional_fields,
shape=shape,
)
)
def _elems_to_objs(self, elems):
for folder, elem in zip(self.folders, elems):
if isinstance(elem, Exception):
yield elem
continue
yield parse_folder_elem(elem=elem, folder=folder, account=self.account)
def get_payload(self, folders, additional_fields, shape):
payload = create_element(f"m:{self.SERVICE_NAME}")
payload.append(
shape_element(
tag="m:FolderShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
payload.append(folder_ids_element(folders=folders, version=self.account.version))
return payload
| ecederstrand/exchangelib | exchangelib/services/get_folder.py | Python | bsd-2-clause | 2,503 |
from django.contrib import admin
from django.urls import path
from .urls import urlpatterns
urlpatterns += [
path('admin/', admin.site.urls),
]
| Bouke/django-two-factor-auth | tests/urls_admin.py | Python | mit | 150 |
from touchworks.logger import Logger
import json
import uuid
import requests
import time
logger = Logger.get_logger(__name__)
class TouchWorksException(Exception):
pass
class TouchWorksErrorMessages(object):
GET_TOKEN_FAILED_ERROR = 'unable to acquire the token from web service'
MAGIC_JSON_FAILED = 'magic json api failed'
class SecurityToken(object):
def __init__(self, token, acquired_time=None):
if not token:
raise Exception('token can not be empty')
if not acquired_time:
self.acquired_time = time.time()
else:
self.acquired_time = acquired_time
self.token = token
class TouchWorksEndPoints(object):
GET_TOKEN = 'json/GetToken'
MAGIC_JSON = 'json/MagicJson'
class TouchWorksMagicConstants(object):
ACTION_SEARCH_PATIENTS = 'SearchPatients'
RESULT_SEARCH_PATIENTS = 'searchpatientsinfo'
ACTION_GET_DOCUMENTS = 'GetDocuments'
RESULT_GET_DOCUMENTS = 'getdocumentsinfo'
ACTION_GET_SCHEDULE = 'GetSchedule'
RESULT_GET_SCHEDULE = 'getscheduleinfo'
ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT = 'GetEncounterListForPatient'
RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT = 'getencounterlistforpatientinfo'
ACTION_GET_PATIENT_INFO = 'GetPatient'
RESULT_GET_PATIENT_INFO = 'getpatientinfo'
ACTION_GET_ENCOUNTER = 'GetEncounter'
RESULT_GET_ENCOUNTER = 'getencounterinfo'
ACTION_SAVE_UNSTRUCTURED_DATA = 'SaveUnstructuredDocument'
RESULT_SAVE_UNSTRUCTURED_DATA = 'saveunstructureddocument'
ACTION_GET_DOCUMENT_TYPE = 'GetDocumentType'
RESULT_GET_DOCUMENT_TYPE = 'getdocumenttypeinfo'
ACTION_GET_DICTIONARY = 'GetDictionary'
RESULT_GET_DICTIONARY = 'getdictionaryinfo'
ACTION_SAVE_NOTE = 'SaveNote'
RESULT_SAVE_NOTE = 'savenoteinfo'
ACTION_GET_TASKLIST_BY_VIEW = 'GetTaskListByView'
RESULT_GET_TASKLISTBY_VIEW = 'gettasklistbyviewinfo'
ACTION_GET_DELEGATES = 'GetDelegates'
RESULT_GET_DELEGATES = 'getdelegatesinfo'
ACTION_GET_TASK_COMMENTS = 'GetTaskComments'
RESULT_GET_TASK_COMMENTS = 'gettaskcommentsinfo'
ACTION_SAVE_TASK = 'savetask'
RESULT_SAVE_TASK = 'savetaskinfo'
ACTION_SEARCH_TASK_VIEWS = 'SearchTaskViews'
RESULT_SEARCH_TASK_VIEWS = 'searchtaskviewsinfo'
ACTION_SAVE_TASK_STATUS = 'SaveTaskStatus'
RESULT_SAVE_TASK_STATUS = 'savetaskstatusinfo'
ACTION_GET_TASK = 'GetTask'
RESULT_GET_TASK = 'gettaskinfo'
ACTION_SAVE_TASK_COMMENT = 'SaveTaskComent'
RESULT_SAVE_TASK_COMMENT = 'savetaskcommentinfo'
ACTION_SAVE_MSG_FROM_PAT_PORTAL = 'SaveMsgFromPatPortal'
RESULT_SAVE_MSG_FROM_PAT_PORTAL = 'savemsgfrompatportalinfo'
ACTION_GET_TASK_LIST = 'GetTaskList'
RESULT_GET_TASK_LIST = 'gettasklistinfo'
ACTION_SET_PATIENT_LOCATION_AND_STATUS = 'SetPatientLocationAndStatus'
RESULT_SET_PATIENT_LOCATION_AND_STATUS = 'setpatientlocationandstatusinfo'
ACTION_GET_CLINICAL_SUMMARY = 'GetClinicalSummary'
RESULT_GET_CLINICAL_SUMMARY = 'getclinicalsummaryinfo'
ACTION_GET_PATIENT_ACTIVITY = 'GetPatientActivity'
RESULT_GET_PATIENT_ACTIVITY = 'getpatientactivityinfo'
ACTION_GET_PATIENT_PHARAMCIES = 'GetPatientPharmacies '
RESULT_GET_PATIENT_PHARAMCIES = 'getpatientpharmaciesinfo'
ACTION_SET_PATIENT_MEDHX_FLAG = 'SetPatientMedHXFlag '
RESULT_SET_PATIENT_MEDHX_FLAG = 'setpatientmedhxflaginfo'
ACTION_GET_CHANGED_PATIENTS = 'GetChangedPatients '
RESULT_GET_CHANGED_PATIENTS = 'getchangedpatientsinfo'
ACTION_GET_PATIENT_LOCATIONS = 'GetPatientLocations '
RESULT_GET_PATIENT_LOCATIONS = 'getpatienlLocationsinfo'
ACTION_GET_USER_ID = 'GetUserID '
RESULT_GET_USER_ID = 'getuseridinfo'
ACTION_GET_PROVIDER = 'GetProvider'
RESULT_GET_PROVIDER = 'getproviderinfo'
ACTION_GET_PROVIDER_INFO = 'GetProviderInfo'
RESULT_GET_PROVIDER_INFO = 'getproviderinfoinfo'
ACTION_GET_PROVIDERS = 'GetProviders'
RESULT_GET_PROVIDERS = 'getprovidersinfo'
ACTION_GET_USER_PREFERENCES = 'GetUserPreferences'
RESULT_GET_USER_PREFERENCES = 'getuserpreferencesinfo'
class TouchWorks(object):
TOKEN_DEFAULT_TIMEOUT_IN_SECS = 20 * 60
def __init__(self, base_url, username,
password, app_name, cache_token=True,
token_timeout=TOKEN_DEFAULT_TIMEOUT_IN_SECS,
app_username=None):
"""
creates an instance of TouchWorks, connects to the TouchWorks Web Service
and caches username, password, app_name
:param base_url: required
:param username: required
:param password: required
:param app_name: required
:param cache_token: optional
:param token_timeout: optional
:param app_username: optional
:return:
"""
if not base_url:
raise ValueError('base_url can not be null')
if not username:
raise ValueError('username can not be null')
if not password:
raise ValueError('password can not be null')
if not app_name:
raise ValueError('app_name can not be null')
self._base_url = base_url
self._app_name = app_name
self._username = username
# FIXME: store username, password only if user decided to cache token
self._password = password
self._token_timeout = token_timeout
self._ehr_username = app_username
self._cache_token = cache_token
self._token = self.get_token(self._app_name, self._username, self._password)
def get_token(self, appname, username, password):
"""
get the security token by connecting to TouchWorks API
"""
ext_exception = TouchWorksException(
TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR)
data = {'Username': username,
'Password': password}
resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data)
try:
logger.debug('token : %s' % resp)
if not resp.text:
raise ext_exception
try:
uuid.UUID(resp.text, version=4)
return SecurityToken(resp.text)
except ValueError:
logger.error('response was not valid uuid string. %s' % resp.text)
raise ext_exception
except Exception as ex:
logger.exception(ex)
raise ext_exception
def _token_valid(self):
"""
checks if the token cached is valid or has expired by comparing
the time token was created with current time
:return: True if token has not expired yet and False is token is empty or
it has expired
"""
if not self._cache_token:
return False
now = time.time()
if now - self._token.acquired_time > self._token_timeout:
logger.debug('token needs to be reset')
return False
return True
def _http_request(self, api, data, headers=None):
"""
internal method for handling request and response
and raising an exception is http return status code is not success
:rtype : response object from requests.post()
"""
if not headers:
headers = {'Content-Type': 'application/json'}
if not self._token_valid:
self._token = self.get_token(self._app_name, self._username, self._password)
response = requests.post(self._base_url + '/' + api, data=json.dumps(data),
headers=headers)
# raise an exception if the status was not 200
logger.debug(json.dumps(data))
logger.debug(response.text)
response.raise_for_status()
return response
def save_note(self, note_text, patient_id,
document_type,
document_status='Unsigned', wrapped_in_rtf='N'):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_NOTE action
:return: JSON response
"""
allowed_document_status = ['Unsigned', 'Final']
if document_status not in ['Unsigned', 'Final']:
raise ValueError('document_status was invalid. allowed values are %s' %
allowed_document_status)
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_SAVE_NOTE,
patient_id=patient_id,
parameter1=note_text,
parameter2=document_type,
parameter3=document_status,
parameter4=wrapped_in_rtf)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_NOTE)
return result
def search_patients(self, search_criteria,
include_picture='N', organization_id=None):
"""
invokes TouchWorksMagicConstants.ACTION_SEARCH_PATIENTS action
:return: JSON response
"""
include_picture = include_picture or ''
organization_id = organization_id or ''
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_SEARCH_PATIENTS,
app_name=self._app_name,
token=self._token.token,
parameter1=search_criteria,
parameter2=include_picture,
parameter3=organization_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_PATIENTS)
return result
def get_document_type(self, ehr_username, doc_type):
"""
invokes TouchWorksMagicConstants.ACTION_GET_DOCUMENT_TYPE action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_DOCUMENT_TYPE,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
parameter1=doc_type
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DOCUMENT_TYPE)
return result
def get_patient(self, ehr_username, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_PATIENT_INFO action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_INFO,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
patient_id=patient_id
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_INFO)
return result
def get_encounter(self, ehr_username, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_ENCOUNTER,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
patient_id=patient_id
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER)
return result
def get_dictionary(self, dictionary_name):
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_DICTIONARY,
parameter1=dictionary_name,
app_name=self._app_name,
token=self._token.token)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DICTIONARY)
return result
def find_document_type_by_name(self, entity_name, active='Y',
match_case=True):
"""
search document types by name and active(Y/N) status
:param entity_name: entity name
:return:
"""
all_types = self.get_dictionary('Document_Type_DE')
if match_case:
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0,
all_types)
else:
token = entity_name.lower()
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].lower().find(token) >= 0,
all_types)
return filtered
def get_encounter_list_for_patient(self, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT,
app_name=self._app_name,
token=self._token.token,
patient_id=patient_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT)
return result
def save_unstructured_document(self, ehr_username,
patient_id,
encounter_id,
document_content):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_UNSTRUCTURED_DATA action
:return: JSON response
"""
doc_xml = "<docParams><item name='documentCommand' value='I'/>" + \
"<item name='documentType' value='Chart'/>" + \
"<item name='authorCode' value='ResLet'/>" + \
"<item name='ahsEncounterID' value='@@ENCOUNTERID@@'/>" + \
"<item name='OrganizationID' value=''/>" + \
"<item name='accessionValue' value=''/>" + \
"<item name='appGroup' value='TouchWorks'/></docParams>"
doc_xml = doc_xml.replace("@@ENCOUNTERID@@", str(encounter_id))
print(doc_xml)
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_UNSTRUCTURED_DATA,
patient_id=patient_id,
user_id=ehr_username,
parameter1=doc_xml,
parameter2=document_content)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_UNSTRUCTURED_DATA)
return result
def set_patient_location_and_status(self, patient_id,
encounter_status,
patient_location):
"""
invokes TouchWorksMagicConstants.ACTION_SET_PATIENT_LOCATION_AND_STATUS action
:param encounter_status - EntryName from the Encounter_Status_DE dictionary.
The desired entryname can be looked up with the GetDictionary action.
:param patient_location - EntryName from the Site_Location_DE dictionary.
The desired entryname can be looked up with the GetDictionary action.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_LOCATION_AND_STATUS,
patient_id=patient_id,
parameter1=encounter_status,
parameter2=patient_location)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_LOCATION_AND_STATUS)
return result
def get_clinical_summary(self, patient_id,
section,
encounter_id_identifer,
verbose=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_CLINICAL_SUMMARY action
:param patient_id:
:param section - if one of the following values is specified, Section indicates
which section of clinical data to return. If no Section is specified,
all sections with data are returned. You can specify multiple sections
using a pipe-delimited list. For example, "Vitals|Results."
List
ChiefComplaint
Vitals
Activities
Alerts
Problems
Results
History
Medications
Allergies
Immunizations
Orders
:param encounter_id_identifer - identifier for the encounter. Used in conjunction with
the "ChiefComplaint" when called in Parameter1. EncounterID can be acquired
with the Unity call GetEncounterList.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_CLINICAL_SUMMARY,
patient_id=patient_id,
parameter1=section,
parameter2=encounter_id_identifer,
parameter3=verbose)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_CLINICAL_SUMMARY)
return result
def get_patient_activity(self, patient_id, since=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_ACTIVITY,
patient_id=patient_id,
parameter1=since)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_ACTIVITY)
return result
def set_patient_medhx_flag(self, patient_id,
medhx_status):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_MEDHX_FLAG,
patient_id=patient_id,
parameter1=medhx_status
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_MEDHX_FLAG)
return result
def get_changes_patients(self, patient_id,
since,
clinical_data_only='Y',
verbose='Y',
quick_scan='Y',
which_field='',
what_value=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_CHANGED_PATIENTS,
patient_id=patient_id,
parameter1=since,
parameter2=clinical_data_only,
parameter3=verbose,
parameter4=quick_scan,
parameter5=which_field,
parameter6=what_value
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_CHANGED_PATIENTS)
return result
def get_patients_locations(self, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
doc_xml = "<docParams><item name='User' value='@@USER@@'/></docParams>"
doc_xml = doc_xml.replace("@@USER@@", str(patient_id))
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_LOCATIONS,
parameter1=doc_xml)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_LOCATIONS)
return result
def get_patient_pharmacies(self, patient_id,
patients_favorite_only='N'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_PHARAMCIES,
patient_id=patient_id,
parameter1=patients_favorite_only)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_PHARAMCIES)
return result
def get_user_id(self):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_USER_ID)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_USER_ID)
return result
def get_provider(self, provider_id, provider_username=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_PROVIDER action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PROVIDER,
parameter1=provider_id,
parameter2=provider_username)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PROVIDER)
return result
def get_provider_info(self, sought_user):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PROVIDER_INFO,
app_name=self._app_name,
token=self._token.token,
parameter1=sought_user)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PROVIDER_INFO)
return result
def get_providers(self, security_filter,
name_filter='%',
only_providers_flag='Y',
internal_external='I',
ordering_authority='',
real_provider='N'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param security_filter - This is the EntryCode of the Security_Code_DE dictionary
for the providers being sought. A list of valid security codes can be obtained from
GetDictionary on the Security_Code_DE dictionary.
:param name_filter
:param only_providers_flag
:param internal_external
:param ordering_authority
:param real_provider
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PROVIDERS,
parameter1=security_filter,
parameter2=name_filter,
parameter3=only_providers_flag,
parameter4=internal_external,
parameter5=ordering_authority,
parameter6=real_provider)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PROVIDERS)
return result
def get_task_list(self, since='', task_types='', task_status=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_TASK_LIST action
:param since - If given a datetime, retrieves only tasks created (or last modified)
after that date and time. Defaults to 1/1/1900.
:param task_status - Optional list of pipe-delimited task status names.
For example, "Active|In Progress|Complete".
:param task_types - Optional list of pipe-delimited task type names.
For example, "Sign Note|Verify Result|MedRenewal"
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_TASK_LIST,
parameter1=since,
parameter2=task_types,
parameter3=task_status)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT)
return result
def save_message_from_pat_portal(self, patient_id,
p_vendor_name,
p_message_id,
p_practice_id,
message,
sent_date,
transaction_type
):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param
:param message
:param sent_date
:param transaction_type - type To register a patient with the portal,
this should be 'Register Patient Request.'
Valid types are stored in iHealth_TransCode_DE.
Approve Online Consultation
Custom Form Submitted
Decline Online Consultation
Deny Patient Registration
Form Requested
Health Remiders
Register Patient
Register Patient Request
RenewRx
Seek Appointment
Seek Online Consultation
Send Clinical Document
Send General Message
Send Notification Message
Unregister Patient
:return: JSON response
"""
portal_info_xml = '<msg>' + \
'<ppvendor value="@@VENDOR@@" />' + \
'<ppmsgid value="@@MESSAGEID@@" />' + \
'<pppractice value="@@PRACTICE@@" />' + \
'</msg>'
portal_info_xml = portal_info_xml.replace(
'@@VENDOR@@', p_vendor_name).replace(
'@@MESSAGEID@@', p_message_id).replace(
'@@PRACTICE@@', p_practice_id)
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_MSG_FROM_PAT_PORTAL,
patient_id=patient_id,
parameter1=portal_info_xml,
parameter2=self._ehr_username,
parameter3=message,
parameter4=sent_date,
parameter5=transaction_type)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_MSG_FROM_PAT_PORTAL)
return result
def save_task_comment(self, task_id, task_comment):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_TASK_COMMENT,
parameter1=task_id,
parameter6=task_comment)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_TASK_COMMENT)
return result
def get_task(self, patient_id, task_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_TASK,
patient_id=patient_id,
parameter1=task_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_TASK)
return result
def save_task_status(self, task_id,
task_action,
comment,
delegate_id=''):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_TASK_STATUS action
:param task_action - Task action, such as Approve, Complete, or Deny.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_TASK_STATUS,
parameter1=task_id,
parameter2=task_action,
parameter3=delegate_id,
parameter4=comment)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_TASK_STATUS)
return result
def search_task_views(self, user, search_string):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SEARCH_TASK_VIEWS,
parameter1=user,
parameter2=search_string)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_TASK_VIEWS)
return result
def save_task(self, patient_id,
task_type,
target_user,
work_object_id,
comments,
subject):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_TASK action
:param patient_id
:param task_type - EntryMnemonic value from IDX_TASK_ACTION_DE. Dictionary
values can be looked up using the GetDictionary action.
:param target_user - TargetUser Pass in the username of the individual who
will be assigned the task. Typical delegates can be found by calling GetDelegates.
It is also possible to assign a task to a team by passing in 'Team'+the ID
of the corresponding team from the Team_DE dictionary.
The team can be looked up using the GetDictionary action.
If the LoginUser is the same as the TargetUser, the task will be marked as
delegated (and therefore no longer available in GetTask for that LoginUser).
:param work_object_id - The ID of the item to link to the task,
such as the medication or note ID. If not needed, 0 can be passed instead.
:param comments - A comment to set for the task.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_TASK,
patient_id=patient_id,
parameter1=task_type,
parameter2=target_user,
parameter3=work_object_id,
parameter4=comments,
parameter5=subject)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT)
return result
def get_task_comments(self, patient_id, task_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_TASK_COMMENTS,
patient_id=patient_id,
parameter1=task_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_TASK_COMMENTS)
return result
def get_delegates(self, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_DELEGATES,
app_name=self._app_name,
token=self._token.token,
patient_id=patient_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DELEGATES)
return result
def get_task_list_by_view(self, patient_id, task_view_id, org_id=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_TASKLIST_BY_VIEW,
patient_id=patient_id,
parameter1=task_view_id,
parameter2=org_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_TASKLISTBY_VIEW)
return result
def get_schedule(self, ehr_username, start_date,
changed_since, include_pix, other_user='All',
end_date='',
appointment_types=None, status_filter='All'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_SCHEDULE action
:return: JSON response
"""
if not start_date:
raise ValueError('start_date can not be null')
if end_date:
start_date = '%s|%s' % (start_date, end_date)
if not changed_since:
changed_since = ''
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_GET_SCHEDULE,
app_name=self._app_name,
user_id=ehr_username, token=self._token.token,
parameter1=start_date,
parameter2=changed_since,
parameter3=include_pix,
parameter4=other_user,
parameter5=appointment_types,
parameter6=status_filter)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_SCHEDULE)
return result
def get_documents(self, ehr_username, patient_id, start_date=None,
end_date=None, document_id=None, doc_type=None,
newest_document='N'):
"""
invokes TouchWorksMagicConstants.ACTION_GET_DOCUMENTS action
:return: JSON response
"""
if not start_date:
start_date = ''
if not end_date:
end_date = ''
if not doc_type:
doc_type = ''
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_GET_DOCUMENTS,
user_id=ehr_username, token=self._token.token,
patient_id=patient_id,
app_name=self._app_name,
parameter1=start_date,
parameter2=end_date,
parameter3=document_id,
parameter4=doc_type,
parameter5=newest_document)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DOCUMENTS)
return result
def _magic_json(self, action='', user_id='', app_name='', patient_id='',
token='', parameter1='', parameter2='',
parameter3='', parameter4='', parameter5='',
parameter6='', data=''):
"""
utility method to create a magic json object needed to invoke TouchWorks APIs
:return: magic json
"""
if not token:
token = self._token.token
if not app_name:
app_name = self._app_name
if not user_id:
if self._ehr_username:
user_id = self._ehr_username
return {
'Action': action,
'AppUserID': user_id,
'Appname': app_name,
'PatientID': patient_id,
'Token': token,
'Parameter1': parameter1,
'Parameter2': parameter2,
'Parameter3': parameter3,
'Parameter4': parameter4,
'Parameter5': parameter5,
'Parameter6': parameter6,
'Data': data
}
def _get_results_or_raise_if_magic_invalid(self, magic, response, result_key):
try:
j_response = response.json()
if j_response:
if result_key in j_response[0]:
return j_response[0][result_key]
elif 'Error' in j_response[0]:
if magic and 'Action' in magic:
raise TouchWorksException(
magic['Action'] + ' API failed' + ' : ' +
j_response[0]['Error'])
else:
raise TouchWorksException(
TouchWorksErrorMessages.MAGIC_JSON_FAILED + ' : ' +
j_response[0]['Error'])
raise TouchWorksException(TouchWorksErrorMessages.MAGIC_JSON_FAILED)
except Exception as ex:
logger.exception(ex)
raise TouchWorksException(TouchWorksErrorMessages.MAGIC_JSON_FAILED)
| farshidce/touchworks-python | touchworks/api/http.py | Python | mit | 40,619 |
import os
import sys
import signal
import logging
import pytest
import latus.logger
import test_latus.tstutil
os.environ["PYTHONPATH"] = '.'
g_keep_running = True
def control_key_handler(signal, frame):
global g_keep_running
print('%s : ctrl-c detected - exiting' % __file__)
g_keep_running = False
def run_pytest_until_error():
global g_keep_running
test_latus.tstutil.set_cloud_config('aws', True)
g_keep_running = True
latus.logger.init(os.path.join('temp', __file__), 'log')
latus.logger.set_console_log_level(logging.INFO)
print('hit ctrl-c to exit')
signal.signal(signal.SIGINT, control_key_handler)
signal.signal(signal.SIGTSTP, control_key_handler)
count = {'fail': 0, 'pass': 0}
# target = os.path.join('test_latus', 'test_delete.py::test_delete')
while g_keep_running:
if len(sys.argv) > 1:
r = pytest.main(sys.argv[1]) # command line '-s' to see output
else:
r = pytest.main()
if r != 0:
count['fail'] += 1
print('pytest got an error - exiting')
g_keep_running = False
else:
count['pass'] += 1
print('test iteration : %s : return=%s' % (str(count), str(r)))
def main():
run_pytest_until_error()
if __name__ == '__main__':
main()
| latusrepo/latus | run_pytest_until_error.py | Python | gpl-3.0 | 1,334 |
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = 'trunk'
import struct
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def v4_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
Raises:
ValueError: If the integer is too large to be an IPv4 IP
address.
"""
if address > _BaseV4._ALL_ONES:
raise ValueError('Address too large for IPv4')
return Bytes(struct.pack('!I', address))
def v6_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The binary representation of this address.
"""
return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence,
and the index of the last IP address in the sequence.
"""
first = last = addresses[0]
last_index = 0
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
last_index += 1
else:
break
return (first, last, last_index)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(first), str(last)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(nets[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last, last_index) = _find_address_range(ips[i:])
i += last_index + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# We need to distinguish between the string and packed-bytes representations
# of an IP address. For example, b'0::1' is the IPv4 address 48.58.58.49,
# while '0::1' is an IPv6 address.
#
# In Python 3, the native 'bytes' type already provides this functionality,
# so we use it directly. For earlier implementations where bytes is not a
# distinct type, we create a subclass of str to serve as a tag.
#
# Usage example (Python 2):
# ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
#
# Usage example (Python 3):
# ip = ipaddr.IPAddress(b'xxxx')
try:
if bytes is str:
raise TypeError("bytes is not a distinct type")
Bytes = bytes
except (NameError, TypeError):
class Bytes(str):
def __repr__(self):
return 'Bytes(%s)' % str.__repr__(self)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) + other, version=self._version)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) - other, version=self._version)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(long(self._ip)))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
if isinstance(other, _BaseIP):
return (self._version == other._version
and self._ip == other._ip)
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNet):
return (self.network <= other.network and
self.broadcast >= other.broadcast)
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IPNetwork('10.1.1.0/24')
addr2 = IPNetwork('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
or IPv6:
addr1 = IPNetwork('::1/32')
addr2 = IPNetwork('::1/128')
addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
IPNetwork('::2/127'),
IPNetwork('::4/126'),
IPNetwork('::8/125'),
...
IPNetwork('0:0:8000::/33')]
Args:
other: An IPvXNetwork object of the same type.
Returns:
A sorted list of IPvXNetwork objects addresses which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError("%s is not a network object" % str(other))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
if other == self:
return []
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen):
"""Turn the prefix length into a bitwise netmask.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int):
"""Return prefix length from a bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format.
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask.
"""
prefixlen = self._max_prefixlen
while prefixlen:
if ip_int & 1:
break
ip_int >>= 1
prefixlen -= 1
if ip_int == (1 << prefixlen) - 1:
return prefixlen
else:
raise NetmaskValueError('Bit pattern does not match /1*0*/')
def _prefix_from_prefix_int(self, prefixlen):
"""Validate and return a prefix length integer.
Args:
prefixlen: An integer containing the prefix length.
Returns:
The input, possibly converted from long to int.
Raises:
NetmaskValueError: If the input is not an integer, or out of range.
"""
if not isinstance(prefixlen, (int, long)):
raise NetmaskValueError('%r is not an integer' % prefixlen)
prefixlen = int(prefixlen)
if not (0 <= prefixlen <= self._max_prefixlen):
raise NetmaskValueError('%d is not a valid prefix length' %
prefixlen)
return prefixlen
def _prefix_from_prefix_string(self, prefixlen_str):
"""Turn a prefix length string into an integer.
Args:
prefixlen_str: A decimal string containing the prefix length.
Returns:
The prefix length as an integer.
Raises:
NetmaskValueError: If the input is malformed or out of range.
"""
try:
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
raise ValueError
prefixlen = int(prefixlen_str)
except ValueError:
raise NetmaskValueError('%s is not a valid prefix length' %
prefixlen_str)
return self._prefix_from_prefix_int(prefixlen)
def _prefix_from_ip_string(self, ip_str):
"""Turn a netmask/hostmask string into a prefix length.
Args:
ip_str: A netmask or hostmask, formatted as an IP address.
Returns:
The prefix length as an integer.
Raises:
NetmaskValueError: If the input is not a netmask or hostmask.
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = self._ip_int_from_string(ip_str)
except AddressValueError:
raise NetmaskValueError('%s is not a valid netmask' % ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return self._prefix_from_ip_int(ip_int)
except NetmaskValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= self._ALL_ONES
try:
return self._prefix_from_ip_int(ip_int)
except NetmaskValueError:
raise NetmaskValueError('%s is not a valid netmask' % ip_str)
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def masked(self):
"""Return the network object with the host bits masked out."""
return IPNetwork('%s/%d' % (self.network, self._prefixlen),
version=self._version)
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an iterator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
packed_ip = 0
for oc in octets:
try:
packed_ip = (packed_ip << 8) | self._parse_octet(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
raise ValueError
octet_int = int(octet_str, 10)
# Disallow leading zeroes, because no clear standard exists on
# whether these should be interpreted as decimal or octal.
if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
raise ValueError
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseV4.__init__(self, address)
# Efficient copy constructor.
if isinstance(address, IPv4Address):
self._ip = address._ip
return
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
self._ip, = struct.unpack('!I', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: The IPv4 network as a string, 2-tuple, or any format
supported by the IPv4Address constructor.
Strings typically use CIDR format, such as '192.0.2.0/24'.
If a dotted-quad is provided after the '/', it is treated as
a netmask if it starts with a nonzero bit (e.g. 255.0.0.0 == /8)
or a hostmask if it starts with a zero bit
(e.g. /0.0.0.255 == /8), with the single exception of an all-zero
mask which is treated as /0.
The 2-tuple format consists of an (ip, prefixlen), where ip is any
format recognized by the IPv4Address constructor, and prefixlen is
an integer from 0 through 32.
A plain IPv4 address (in any format) will be forwarded to the
IPv4Address constructor, with an implied prefixlen of 32.
For example, the following inputs are equivalent:
IPv4Network('192.0.2.1/32')
IPv4Network('192.0.2.1/255.255.255.255')
IPv4Network('192.0.2.1')
IPv4Network(0xc0000201)
IPv4Network(IPv4Address('192.0.2.1'))
IPv4Network(('192.0.2.1', 32))
IPv4Network((0xc0000201, 32))
IPv4Network((IPv4Address('192.0.2.1'), 32))
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Constructing from a single IP address.
if isinstance(address, (int, long, Bytes, IPv4Address)):
self.ip = IPv4Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
return
# Constructing from an (ip, prefixlen) tuple.
if isinstance(address, tuple):
try:
ip, prefixlen = address
except ValueError:
raise AddressValueError(address)
self.ip = IPv4Address(ip)
self._ip = self.ip._ip
self._prefixlen = self._prefix_from_prefix_int(prefixlen)
else:
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
try:
# Check for a netmask in prefix length form.
self._prefixlen = self._prefix_from_prefix_string(addr[1])
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
self._prefixlen = self._prefix_from_ip_string(addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' % self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
if len(parts) < 3:
raise AddressValueError(ip_str)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
if len(parts) > self._HEXTET_COUNT + 1:
raise AddressValueError(ip_str)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
try:
skip_index, = (
[i for i in xrange(1, len(parts) - 1) if not parts[i]] or
[None])
except ValueError:
# Can't have more than one '::'
raise AddressValueError(ip_str)
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
# Otherwise, allocate the entire address to parts_hi. The endpoints
# could still be empty, but _parse_hextet() will check for that.
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str)
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
if len(hextet_str) > 4:
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
else:
ip_str = str(self)
ip_int = self._ip_int_from_string(ip_str)
parts = []
for i in xrange(self._HEXTET_COUNT):
parts.append('%04x' % (ip_int & 0xFFFF))
ip_int >>= 16
parts.reverse()
if isinstance(self, _BaseNet):
return '%s/%d' % (':'.join(parts), self.prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseV6.__init__(self, address)
# Efficient copy constructor.
if isinstance(address, IPv6Address):
self._ip = address._ip
return
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
hi, lo = struct.unpack('!QQ', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
self._ip = (hi << 64) | lo
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 network object.
Args:
address: The IPv6 network as a string, 2-tuple, or any format
supported by the IPv6Address constructor.
Strings should be in CIDR format, such as '2001:db8::/32'.
The 2-tuple format consists of an (ip, prefixlen), where ip is any
format recognized by the IPv6Address constructor, and prefixlen is
an integer from 0 through 128.
A plain IPv6 address (in any format) will be forwarded to the
IPv6Address constructor, with an implied prefixlen of 128.
For example, the following inputs are equivalent:
IPv6Network('2001:db8::/128')
IPv6Network('2001:db8:0:0:0:0:0:0/128')
IPv6Network('2001:db8::')
IPv6Network(0x20010db8 << 96)
IPv6Network(IPv6Address('2001:db8::'))
IPv6Network(('2001:db8::', 128))
IPv6Network((0x20010db8 << 96, 128))
IPv6Network((IPv6Address('2001:db8::'), 128))
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::/32 and not an
IP address on a network, eg, 2001:db8::1/32.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Constructing from a single IP address.
if isinstance(address, (int, long, Bytes, IPv6Address)):
self.ip = IPv6Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Constructing from an (ip, prefixlen) tuple.
if isinstance(address, tuple):
try:
ip, prefixlen = address
except ValueError:
raise AddressValueError(address)
self.ip = IPv6Address(ip)
self._ip = self.ip._ip
self._prefixlen = self._prefix_from_prefix_int(prefixlen)
else:
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if len(addr) == 2:
# This may raise NetmaskValueError
self._prefixlen = self._prefix_from_prefix_string(addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
@property
def with_netmask(self):
return self.with_prefixlen
| objectsoul/ipaddr-py | ipaddr.py | Python | apache-2.0 | 61,896 |
"""
Notes:
- Brugia protein sequences: https://www.ncbi.nlm.nih.gov/bioproject/PRJNA10729
- wBm protein sequences: https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=292805
- BLASTP against Reference proteins (refseq protein) from Human, using BLOSUM45 matrix.
- BLASTP against nr proteins from O. volvulus and wOv, using BLOSUM45 matrix.
- Caution about the Oncho results; I'm not sure how many protein sequences have been annotated.
- The ChEMBL search results were performed under the "Target Search" tab on their website. Downloaded as a tab-deliminited file.
"""
import os, cPickle, pandas, re
from molecbio import sequ
from cobra.flux_analysis import single_reaction_deletion, double_reaction_deletion
from model_tools import load_model, id_bottleneck_metabolites
import xml.etree.ElementTree as ET
def get_rxns_to_delete(model):
rxn_to_genes = {}
for rxn in model.reactions:
if not rxn.gene_names or not rxn.id.startswith(('R', 'ACYLCOA', 'N00001')):
continue
rxn_to_genes[rxn.id] = [g.strip() for g in rxn.gene_names.split(';')]
return rxn_to_genes
def do_deletions(rxn_data, model, rxn_to_genes, do_double_ko=False, obj_fraction=0.0):
fraction_epsilon = 0.0001
orig_f = float(model.optimize().f)
s_rates, s_stats = single_reaction_deletion(model, list(rxn_to_genes.keys()))
print('Original objective %.1f; %i reactions knocked out.' % (orig_f, len(s_stats)))
print('Calculating model deficiencies for each knockout...')
for r_id, new_f in s_rates.items():
if abs(new_f) < fraction_epsilon:
new_f = 0.0
stat = s_stats[r_id]
if new_f/orig_f <= obj_fraction+fraction_epsilon:
if stat == 'optimal':
deficiencies = find_model_deficiencies(model, orig_f, new_f, r_id)
else:
deficiencies = 'infeasible'
rxn_data[r_id] = {'objective':round(new_f/orig_f*100, 1), 'deficiencies':deficiencies, 'genes':rxn_to_genes[r_id]}
if do_double_ko:
double_rxn_ids = [r for r in list(rxn_to_genes.keys()) if r not in rxn_data]
print('Performing double knockouts on %i candidates...' % len(double_rxn_ids))
double_ko_data = double_reaction_deletion(model, double_rxn_ids[:5], number_of_processes=3)
d_r1, d_r2, d_rates = double_ko_data['y'], double_ko_data['x'], double_ko_data['data']
def find_model_deficiencies(model, orig_f, new_f, r_id):
deficiencies = []
ob = model.reactions.get_by_id(r_id).bounds
model.reactions.get_by_id(r_id).bounds = (0,0)
diffs = id_bottleneck_metabolites(model, new_f, 'BIOMASS', threshold=1.0)
for recovered_f, mtb_id in diffs:
def_str = '%s (%.1f)' % (mtb_id, recovered_f/orig_f*100)
sub_defs = []
for sub_f, sub_mtb_id in id_bottleneck_metabolites(model, new_f, mtb_id.upper(), threshold=1.0):
sub_defs.append('%s(%.1f)' % (sub_mtb_id, sub_f/orig_f*100))
if sub_defs:
def_str += ' [%s]' % ', '.join(sub_defs)
deficiencies.append(def_str)
model.reactions.get_by_id(r_id).bounds = ob
if not deficiencies:
return 'unrecoverable'
else:
return ', '.join(deficiencies)
def process_gene_data(rxn_data):
gene_data = {}
for r_id, data in rxn_data.items():
for gene in set(data['genes']):
g_entry = generate_gene_entry(data, r_id, gene)
gene_data.setdefault(gene, []).append(g_entry)
for gene, entries in gene_data.items():
rs_per_g = len(entries)
if rs_per_g > 1:
for e in entries:
e['num_reactions'] = rs_per_g
return gene_data
def generate_gene_entry(r_data, r_id, gene):
g_data = {}
if len(set(r_data['genes'])) == 1:
g_data['other_genes'] = ''
else:
g_data['other_genes'] = ','.join(sorted(list(set(r_data['genes']) - set([gene]))))
g_data['reaction'] = r_id
g_data['objective'] = r_data['objective']
g_data['deficiencies'] = r_data['deficiencies']
g_data['num_reactions'] = 1
return g_data
# # # Save/load functions
def save_data_object(data_obj, file_path):
with open(file_path, 'wb') as f:
cPickle.dump(data_obj, f, protocol=0)
print('Saved data to %s' % file_path)
def load_data_object(file_path):
with open(file_path, 'rb') as f:
data_obj = cPickle.load(f)
print('Loaded data from %s' % file_path)
return data_obj
def save_data_to_excel(gene_data, gene_data_out_file, expression_headings):
min_column_width = 10
header_bg = '#DEEDED'
sheet_name = 'Single knockouts'
gene_header = 'Gene ID'
headers_atts = [('# Reactions','num_reactions'), ('Reaction','reaction'), ('Associated genes','other_genes'), ('Objective %','objective'), ('Biomass deficiencies','deficiencies')]
ortho_headers = ['Human homologs\n(#|% identity|% coverage)', 'O. volvulus homologs\n(#|% identity|% coverage)']
chembl_headers = ['# ChEMBL hits', 'ChEMBL hits\n(% identity|species)']
data = {h[0]:[] for h in headers_atts+expression_headings}
for h in [gene_header] + ortho_headers + chembl_headers:
data[h] = []
gene_order = sorted(list(gene_data.keys()))
gene_order.sort(key=lambda g:gene_data[g][0]['deficiencies'])
for gene in gene_order:
for g_data in gene_data[gene]:
data[gene_header].append(gene)
for h, att in headers_atts:
data[h].append(g_data.get(att, 'NOT FOUND'))
human_hlogs = '%i | %.1f | %.1f' % (g_data['num_human_prots'], g_data['human_prot_identity'],g_data['human_prot_coverage']) if g_data['num_human_prots'] else ' '
data[ortho_headers[0]].append(human_hlogs)
oncho_hlogs = '%i | %.1f | %.1f' % (g_data['num_oncho_prots'], g_data['oncho_prot_identity'],g_data['oncho_prot_coverage']) if g_data['num_oncho_prots'] else ' '
data[ortho_headers[1]].append(oncho_hlogs)
data[chembl_headers[0]].append(g_data.get('num_chembl_hits', 0))
data[chembl_headers[1]].append(g_data.get('chembl_hits', ''))
if '_max_observed_expression' in g_data['expression_levels']:
max_expression = round(g_data['expression_levels']['_max_observed_expression'], 1)
else:
max_expression = " "
data[expression_headings[0][0]].append(max_expression)
for h, ls in expression_headings[1:]:
exp_levels = [g_data['expression_levels'].get(l) for l in ls]
data[h].append(' | '.join(exp_levels))
col_headers = [gene_header] + [h[0] for h in headers_atts] + [i for i in ortho_headers+chembl_headers] + [j[0] for j in expression_headings]
writer = pandas.ExcelWriter(gene_data_out_file, engine='xlsxwriter')
df = pandas.DataFrame(data)[col_headers] # The [] specifies the order of the columns.
df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=1, header=False)
worksheet = writer.sheets[sheet_name]
header_format = writer.book.add_format({'bold': True, 'text_wrap': True, 'align': 'center', 'valign': 'top', 'bg_color': header_bg, 'border': 1})
for i, h in enumerate(col_headers):
col_w = max(len(line.strip()) for line in h.splitlines())
col_width = max(col_w+1, min_column_width)
if i in (0, 2, 3, 5, 9):
col_format = writer.book.add_format({'align': 'left'})
elif i == 10:
col_format = writer.book.add_format({'align': 'center'})
else:
col_format = writer.book.add_format({'align': 'center'})
worksheet.set_column(i, i, col_width, col_format)
worksheet.write(0, i, h, header_format) # Header added manually.
worksheet.freeze_panes(1, 0) # Freezes header row.
writer.save()
print('Data saved to %s' % gene_data_out_file)
# # # Getting protein names and sequences
def save_prot_names_list(gene_data):
prot_list_file = 'utility/b_mal_4.5-wip_single_ko_prot_names.txt'
prot_list = sorted(gene_data.keys())
with open(prot_list_file, 'w') as f:
f.write('\n'.join(prot_list))
print('Saved protein list to %s' % prot_list_file)
def get_prot_name_translations(gene_data, gen_pept_file):
print('Parsing %s...' % gen_pept_file)
prot_to_std, found_names = {}, set()
with open(gen_pept_file, 'r') as f:
prot_name, std_name = None, None
for line in f:
if prot_name == None and line.startswith('VERSION'):
prot_name = line.strip().split()[1]
elif prot_name and "/standard_name=" in line:
std_name = line.partition('=')[2].strip()[1:-1]
if std_name in gene_data:
prot_to_std[prot_name] = std_name
found_names.add(std_name)
prot_name, std_name = None, None
for gene in gene_data:
if gene not in found_names:
prot_to_std['%s.1' % gene] = gene
return prot_to_std
def save_prot_sequences(gene_data, prot_to_std, prot_sequences_file):
prots_fasta_file = 'utility/b_malayi_and_wBm_prots.fasta'
all_seqs = sequ.loadfasta(prots_fasta_file)
prots, found_genes = [], set()
for seq in all_seqs:
gene = prot_to_std.get(seq.name)
if not gene: continue
if gene in found_genes:
print('Error: multiple sequences were found matching "%s".' % seq.name)
exit()
prots.append(sequ.Sequence(name=gene, sequence=seq.seq))
found_genes.add(gene)
if len(prots) != len(gene_data):
print('Warning: only found sequences for %i of %i genes. Missing genes:' % (len(prots), len(gene_data)))
for g in set(gene_data) - found_genes:
print(g)
exit()
sequ.savefasta(prots, prot_sequences_file, spaces=False, numbers=False)
print('Saved %i sequences to %s' % (len(prots), prot_sequences_file))
return prots
# # # Parsing BLAST output
def parse_blast_xml(gene_data, blast_xml_file, taxon_name, spc_str):
"""taxon_name is used to name the properties saved in gene_data."""
min_e_val = 1E-30
property_strs = ['num_%s_prots', '%s_prot_id', '%s_prot_identity', '%s_prot_coverage']
gi_split_regex = re.compile('\s?>gi\|\S+\|\S+\|\S+\|\s?')
gene_spc_regex = re.compile('(.+) \[(.+)\]$')
isoform_regex = re.compile('(.+) (isoform \S+)(.*)$')
tree = ET.parse(blast_xml_file)
root = tree.getroot()
iterations = root.find('BlastOutput_iterations')
for q_hit in iterations:
gene = q_hit.find('Iteration_query-def').text
if gene not in gene_data:
continue
prot_len = float(q_hit.find('Iteration_query-len').text)
s_hits = q_hit.find('Iteration_hits')
hit_names, top_hit_id, top_e_val, top_identity, top_coverage = get_good_hits(s_hits, min_e_val, spc_str.lower(), gi_split_regex, gene_spc_regex, isoform_regex)
num_hits = len(hit_names)
top_coverage = round(top_coverage/prot_len*100.0, 1)
for g_data in gene_data[gene]:
for p_str, val in zip(property_strs, [num_hits, top_hit_id, top_identity, top_coverage]):
g_data[p_str % taxon_name] = val
def get_good_hits(s_hits, min_e_val, spc_str, gi_split_regex, gene_spc_regex, isoform_regex):
"""Counts based on the 'Hit_def' field in the subject hits, which is the name. Attempts to remove isoforms and predicted proteins from the count.
"""
best_hit_id, best_e_val, best_ident, best_coverage = None, min_e_val + 1, 0, 0
hit_names = set()
for s_hit in s_hits:
hit_e_val, hit_ident, hit_coverage = min_e_val + 1, 0, 0
for hsp in s_hit.find('Hit_hsps'):
e_val = float(hsp.find('Hsp_evalue').text)
if e_val < hit_e_val:
hit_e_val = e_val
hit_ident = round(float(hsp.find('Hsp_identity').text)/float(hsp.find('Hsp_align-len').text)*100, 1)
hit_coverage = int(hsp.find('Hsp_query-to').text) - int(hsp.find('Hsp_query-from').text)
if hit_e_val < min_e_val:
name = parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex)
if not name:
continue # A hit was found, but it did not match the spc_str
hit_names.add(name)
if hit_e_val < best_e_val:
best_hit_id = s_hit.find('Hit_accession').text.strip()
best_ident = hit_ident
best_e_val, best_coverage = hit_e_val, hit_coverage
if not hit_names:
return hit_names, None, None, 0, 0
return hit_names, best_hit_id, best_e_val, best_ident, best_coverage
def parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex):
name = find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex)
if not name:
return False
if 'isoform' in name:
nm, iso, rem = isoform_regex.match(name).groups()
name = nm + rem
if name.lower().startswith('predicted: '):
name = name[11:]
return name
def find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex):
for hit in gi_split_regex.split( s_hit.find('Hit_def').text ):
m = gene_spc_regex.match(hit)
if not m:
continue
name, spc = m.groups()
if spc_str in spc.lower():
return name
return False
# # # Getting expression data
def get_expression_data(gene_data, expression_file, sheetnames, conditions):
for sheetname in sheetnames:
parse_expression_sheet(gene_data, expression_file, sheetname, conditions)
null_exp = {c:'--' for c in conditions}
for gene, entries in gene_data.items():
for e in entries:
if 'expression_levels' not in e:
e['expression_levels'] = null_exp
def parse_expression_sheet(gene_data, filename, sheetname, conditions):
seq_name_key = 'Sequence Name'
replicate_inds = ['a', 'b', 'c']
frame = pandas.read_excel(filename, sheetname)
if len(frame.columns) != len(set(frame.columns)):
print('Error: at least one column header was not unique in sheet %s.' % sheetname)
exit()
cond_keys = [[cond+ind for ind in replicate_inds if cond+ind in frame.columns] for cond in conditions]
for i in frame.index:
row = frame.ix[i]
seq_name = row[seq_name_key]
if seq_name not in gene_data:
continue
avgs = [sum(row[k] for k in ck)/float(len(ck)) for ck in cond_keys]
max_expression = max(avgs)
exp = {c:'%i'%(round(a/max_expression*100.0, 0) if max_expression else 0) for a,c in zip(avgs, conditions)}
exp['_max_observed_expression'] = max_expression
for entry in gene_data[seq_name]:
entry['expression_levels'] = exp
# # # Parse ChEMBL search file
def parse_chembl_results(gene_data, chembl_results_file):
max_e_val = 1E-30
chembl_data = {}
total_hits, sig_hits = 0, 0
with open(chembl_results_file) as f:
f.readline() # Header
for line in f:
if not line.strip():
continue
total_hits += 1
gene, chembl_id, tid, description, uniprot_id, target_type, species, _, _, identity, blast_score, e_value = line.split('\t')
identity, e_value = float(identity), float(e_value)
if e_value > max_e_val:
continue
sig_hits += 1
hit_data = {'chembl_id':chembl_id, 'species':species, 'identity':identity, 'e_value':e_value}
chembl_data.setdefault(gene, []).append(hit_data)
print('%i of the %i ChEMBL hits were below the E-value threshold of %.1e' % (sig_hits, total_hits, max_e_val))
for gene, data_list in chembl_data.items():
if gene not in gene_data:
continue
data_list.sort(key=lambda d: d['e_value'])
chembl_hits = ', '.join('%s (%i | %s)' % (d['chembl_id'], round(d['identity'], 0), d['species']) for d in data_list)
for g_data in gene_data[gene]:
g_data['num_chembl_hits'] = len(data_list)
g_data['chembl_hits'] = chembl_hits
# # # Misc functions
def print_deficiencies(rxn_data):
r_list = sorted(list(rxn_data.keys()))
r_list.sort(key=lambda r:rxn_data[r]['deficiencies'])
print('%i reactions with significant impact:' % len(r_list))
for r_id in r_list:
print('%s %.1f%% of objective value.' % (r_id, rxn_data[r_id]['objective']))
print('\t%s' % rxn_data[r_id]['deficiencies'])
print('\t%s' % ', '.join(rxn_data[r_id]['genes']))
# # # Main paths
files_dir = '/mnt/hgfs/win_projects/brugia_project'
utility_dir = '/home/dave/Desktop/projects/brugia_project/utility'
# # # Main run options
model_file = 'model_b_mal_4.5-wip.xlsx'
run_str = 'bm_4.5-lo_ox-lo_glu'
wolbachia_ratio = 0.1
objective_threshold_fraction = 0.25 # Considered significant if resulting objective function is less than 0.25 (25%) of the original.
do_double_ko = False
expression_conditions = ['L3', 'L3D6', 'L3D9', 'L4', 'F30', 'M30', 'F42', 'M42', 'F120', 'M120']
expression_headings = [('Max\nexpression',), ('Larval expression\n(L3|L3D6|L3D9|L4)', ('L3','L3D6','L3D9','L4')), ('Adult female expression\n(F30|F42|F120)', ('F30','F42','F120')), ('Adult male expression\n(M30|M42|M120)', ('M30','M42','M120'))]
gene_data_out_file = os.path.join(files_dir, '%s_gene_info.xlsx'%(run_str))
# # # Required files
expression_file = os.path.join(files_dir, 'All_Stages_Brugia_Wolbachia_FPKMs.xlsx')
expression_sheets = ('Brugia_FPKMs', 'Wolbachia_FPKMs')
gen_pept_file = os.path.join(utility_dir, 'b_malayi_genpept.gp')
human_blast_xml_file = os.path.join(utility_dir, '%s_human_blast.xml'%(run_str))
oncho_blast_xml_file = os.path.join(utility_dir, '%s_oncho_blast.xml'%(run_str))
chembl_results_file = os.path.join(utility_dir, '%s_chembl.txt'%(run_str))
# # # Intermediate files created
prot_sequences_file = os.path.join(utility_dir, '%s_prots.fa'%(run_str))
rxn_ko_data_file = os.path.join(utility_dir, '%s_rxns.pkl'%(run_str))
gene_ko_data_file = os.path.join(utility_dir, '%s_genes.pkl'%(run_str))
# # # Run steps
if not os.path.isfile(rxn_ko_data_file):
rxn_data = {}
model_path = os.path.join(files_dir, model_file)
model = load_model(model_path, wolbachia_ratio)
rxn_to_genes = get_rxns_to_delete(model)
do_deletions(rxn_data, model, rxn_to_genes, do_double_ko, objective_threshold_fraction) # Fills out 'objective', 'deficiencies', and 'genes' of reactions in rxn_data.
save_data_object(rxn_data, rxn_ko_data_file)
else:
rxn_data = load_data_object(rxn_ko_data_file)
#print_deficiencies(rxn_data)
if not os.path.isfile(gene_ko_data_file):
gene_data = process_gene_data(rxn_data)
get_expression_data(gene_data, expression_file, expression_sheets, expression_conditions) # Fills out 'expression_levels'
if not os.path.isfile(prot_sequences_file):
prot_to_std = get_prot_name_translations(gene_data, gen_pept_file)
prots = save_prot_sequences(gene_data, prot_to_std, prot_sequences_file)
else:
prots = sequ.loadfasta(prot_sequences_file)
for blast_file in [human_blast_xml_file, oncho_blast_xml_file]:
if not os.path.isfile(blast_file):
print('Error: no BLAST results found at %s' % blast_file)
exit()
parse_blast_xml(gene_data, human_blast_xml_file, 'human', 'homo sapiens')
parse_blast_xml(gene_data, oncho_blast_xml_file, 'oncho', 'onchocerca volvulus')
if not os.path.isfile(chembl_results_file):
print('Error: no ChEMBL results found at %s' % chembl_results_file)
exit()
# parse_chembl_results(gene_data, chembl_results_file) # Where it should be called.
save_data_object(gene_data, gene_ko_data_file)
else:
gene_data = load_data_object(gene_ko_data_file)
parse_chembl_results(gene_data, chembl_results_file) # # # Temp place to be called from.
save_data_to_excel(gene_data, gene_data_out_file, expression_headings)
| dave-the-scientist/brugia_project | get_knockout_info.py | Python | gpl-3.0 | 20,007 |
import unittest
import cassandranames
from dnstypeconstants import *
# Running this *will destroy* data in Cassandra.
class TestCassandraNames(unittest.TestCase):
def setUp(self):
cassandranames.install_schema(drop_first=True, rf=1)
self.names = cassandranames.CassandraNames()
def test_names(self):
# Verify behavior on an initial, empty set.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {})
# Add an "A" record.
self.names.insert("pantheon.example.com", A, "192.168.0.1")
# Verify that the "A" records appears in lookups.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {A: {"192.168.0.1": {"ttl": 900}}})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.1": {"ttl": 900}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
# Add another "A" record, this time with an explicit TTL.
self.names.insert("pantheon.example.com", A, "192.168.0.2", 60)
# Verify that both "A" records appear in results.
data = self.names.lookup("pantheon.example.com")
a_records = {"192.168.0.1": {"ttl": 900}, "192.168.0.2": {"ttl": 60}}
self.assertEqual(data, {A: a_records})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: a_records})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
# Add an MX record.
self.names.insert("pantheon.example.com", MX, "192.168.0.3", preference=10)
# Verify the MX record.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {A: a_records, MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
# Delete the A record for 192.168.0.1.
self.names.remove("pantheon.example.com", A, "192.168.0.1")
# Verify the other "A" record and the "MX" record still exists.
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.2": {"ttl": 60}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
# Delete all "MX" records and verify the deletion.
self.names.remove("pantheon.example.com", MX)
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.2": {"ttl": 60}}})
# Delete all records for the domain and verify deletion.
self.names.remove("pantheon.example.com")
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {})
# Insert some other records, just for fun.
self.names.insert("pantheon.example.com", A, "10.0.0.1", 60)
self.names.insert("pantheon.example.com", A, "10.0.0.2", 60)
self.names.insert("pantheon.example.com", MX, "10.0.0.3", 60, 10)
self.names.insert("pantheon.example.com", MX, "10.0.0.4", 60, 20)
if __name__ == '__main__':
unittest.main()
| pantheon-systems/cassandra-dns | cassandranames-test.py | Python | mit | 3,496 |
# Legacy:
def save_to_disfeval_file(p, g, w, f, filename, incremental=False):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
f :: original input gold standard file
OUTPUT:
filename :: name of the file where the predictions
are written. In right format for disfluency evaluation
for computing the performance in terms of precision
recall and f1 score
'''
out = ''
if incremental == False:
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
else:
#We want a less straight forward output- increco style first increment always
#has a start symbol and the first tag
#last one always has the end of utt tag and this may be different from the penultimate one which covers the same
#words, but by virtue of knowing it's the end of the sequence it could change
#always have an iteration over the ground truth utt to give the prefixes
#with the predictions all the prefixes of this
for sl, sp, sw in zip(g, p, w): # for each utterance
prefix = [] # init the prefix, the word and the ground truth
sw.append('EOS') #adding an extra word position
sl.append('O')
sp[-1].append('O') #trivially not evaluated
for wl, pp, w in zip(sl, sp, sw): #for each prefix in the utt pp = prefix prediciton, not just latest word
prefix.append(w + ' ' + wl + ' ')
assert(len(prefix)==len(pp)),str(prefix)+str(pp)
out+='BOS O O\n'
for my_prefix,my_prediction in zip(prefix,pp): #for each prediction
out+=my_prefix+" "+my_prediction+"\n"
#last one is final for the prefix, the last one will have an EOS
out+="\n"
f = open(filename,'w')
f.writelines(out)
f.close()
return filename
| dsg-bielefeld/deep_disfluency | deep_disfluency/utils/accuracy.py | Python | mit | 2,054 |
from django import template
from django.utils.translation import ugettext_lazy as _
register = template.Library()
@register.filter
def percentage(value):
"""
Format value (which is a number) as percentage.
"""
return _('{value} %').format(value=100*value)
| Clarity-89/clarityv2 | src/clarityv2/utils/templatetags/math.py | Python | mit | 275 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.