repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
mick-d/nipype
|
refs/heads/master
|
nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py
|
1
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brains import BRAINSTrimForegroundInDirection
def test_BRAINSTrimForegroundInDirection_inputs():
input_map = dict(BackgroundFillValue=dict(argstr='--BackgroundFillValue %s',
),
args=dict(argstr='%s',
),
closingSize=dict(argstr='--closingSize %d',
),
directionCode=dict(argstr='--directionCode %d',
),
environ=dict(nohash=True,
usedefault=True,
),
headSizeLimit=dict(argstr='--headSizeLimit %f',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = BRAINSTrimForegroundInDirection.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSTrimForegroundInDirection_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSTrimForegroundInDirection.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
daill/OPy
|
refs/heads/master
|
opy/client/o_db_set.py
|
1
|
# Copyright 2015 Christian Kramer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from inspect import isclass
import io
import logging
from opy.client.o_db_base import BaseEntity, BaseEdge, BaseVertex
from opy.client.o_db_utils import escapeclassname, retrieveclassname
from opy.common.o_db_constants import OBinaryType, OPlainClass, OSQLOperationType, OSQLIndexType
from opy.common.o_db_exceptions import SQLCommandException, WrongTypeException, OPyClientException
__author__ = 'daill'
class QueryType(object):
def parse(self):
raise NotImplementedError("You have to implement the parse method")
def getclass(self):
raise NotImplementedError("You have to implement the getclass method")
class QueryElement(object):
def __init__(self):
self._query = ""
def __str__(self):
return self._query
class GraphType(QueryType):
def __init__(self):
self.fetchplan = ""
self.operationtype = None
class WhereType(QueryElement):
def __init__(self):
super().__init__()
class Prefixed(object):
def __init__(self, clazz:BaseEntity, prefix:str):
self.clazz = clazz
self.prefix = prefix
class Move(QueryType):
"""
MOVE VERTEX <source> TO <destination> [SET [<field>=<value>]* [,]] [MERGE <JSON>]
"""
def __init__(self, src, dest:GraphType, *queryelements):
super().__init__()
self.__src = src
self.__dest = dest
self.__dest.operationtype = OSQLOperationType.MOVE
self.__queryelements = queryelements
def parse(self):
try:
query_string = io.StringIO()
query_string.write("move vertex ")
if isinstance(self.__src, str):
# just one rid
query_string.write(self.__src)
elif isinstance(self.__src, list):
# list of rids
query_string.write('[')
for i, rid in enumerate(self.__src):
query_string.write(rid)
if i < len(self.__src)-1:
query_string.write(",")
query_string.write(']')
elif isinstance(self.__src, Select):
query_string.write('(')
query_string.write(self.__src.parse())
query_string.write(')')
query_string.write(" to ")
if isinstance(self.__dest, Class):
query_string.write("class: ")
query_string.write(self.__dest.parse())
elif isinstance(self.__dest, Cluster):
query_string.write("cluster: ")
query_string.write(self.__dest.parse())
for element in self.__queryelements:
if isinstance(element, Set) or isinstance(element, Merge):
query_string.write(str(element))
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Delete(QueryType):
"""
Class to provide deletion of vertices and edges
DELETE EDGE <rid>|FROM <rid>|TO <rid>|[<class>] [WHERE <conditions>]> [LIMIT <MaxRecords>]
DELETE VERTEX <rid>|[<class>] [WHERE <conditions>]> [LIMIT <MaxRecords>]
"""
def __init__(self, entity, *elements:QueryElement):
self.__query_rule_index = ["Where", "Limit"]
self.__elements = elements
self.__entity = entity
self.__entity.operationtype = OSQLOperationType.DELETE
self.__fromrid = None
self.__torid = None
self.__rid = None
def fromRID(self, obj):
if isinstance(obj, str):
self.__fromrid = obj
elif isinstance(obj, BaseEntity):
self.__fromrid = obj.getRID()
return self
def toRID(self, obj):
if isinstance(obj, str):
self.__torid = obj
elif isinstance(obj, BaseEntity):
self.__torid = obj.getRID()
return self
def byRID(self, rid:str=None):
if rid:
self.__rid = rid
elif isinstance(self.__entity, BaseEntity):
self.__rid = self.__entity.getobject().getRID()
return self
def parse(self):
try:
# inner method for building the query string based on the given information
# it's the same action for edges as well as vertices
def _decidemethod(name:str=None):
try:
query_string = io.StringIO()
if self.__rid:
return " {} ".format(self.__rid)
elif self.__fromrid or self.__torid:
if self.__fromrid:
query_string.write(" from ")
query_string.write(self.__fromrid)
if self.__torid:
query_string.write(" to ")
query_string.write(self.__torid)
query_string.write(" ")
result = query_string.getvalue()
return result
else:
return " {} ".format(name)
except Exception as err:
logging.error(err)
finally:
query_string.close()
# inner method to build the query string based on the given Where and Limit clause
def _parseelements():
try:
query_string = io.StringIO()
for element in self.__elements:
self.__query_dict[element.__class__.__name__] = str(element)
for key in self.__query_rule_index:
if key in self.__query_dict:
query_string.write(" ")
query_string.write(self.__query_dict[key])
result = query_string.getvalue()
return result
except Exception as err:
logging.error(err)
finally:
query_string.close()
query_string = io.StringIO()
self.__query_dict = dict()
if isclass(self.__entity):
if issubclass(self.__entity, GraphType):
if issubclass(self.__entity, Vertex):
# make sure from and to are None
if self.__fromrid or self.__torid:
logging.warning("it's not allowed to use from and/or to on vertices")
self.__fromrid = None
self.__torid = None
query_string.write("delete vertex ")
elif issubclass(self.__entity, Edge):
query_string.write("delete edge ")
query_string.write(_decidemethod())
elif isinstance(self.__entity, GraphType):
query_string.write(self.__entity.parse())
if not isinstance(self.__entity, Class):
if isinstance(self.__entity, Vertex):
# make sure from and to are None
if self.__fromrid or self.__torid:
logging.warning("it's not allowed to use from and/or to on vertices")
self.__fromrid = None
self.__torid = None
query_string.write(_decidemethod(self.__entity.getobject().__class__.__name__))
elif isinstance(self.__entity, BaseEntity):
query_string.write("delete ")
# if there is an object use it and get the RID
if isinstance(self.__entity, BaseVertex):
query_string.write(" vertex ")
elif isinstance(self.__entity, BaseEdge):
query_string.write(" edge ")
query_string.write(self.__entity.getRID())
query_string.write(" ")
query_string.write(_parseelements())
result_string = query_string.getvalue()
logging.debug("parsed sql string: '{}' from data '{}'".format(result_string, self.__query_dict))
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Create(QueryType):
def __init__(self, type:GraphType):
self.type = type
self.type.operationtype = OSQLOperationType.CREATE
pass
def parse(self):
return self.type.parse()
class Drop(QueryType):
"""
DROP CLASS <class>
"""
def __init__(self, type:GraphType):
self.type = type
self.type.operationtype = OSQLOperationType.DROP
def parse(self):
return self.type.parse()
class Property(GraphType):
def __init__(self, persistent_class:BaseEntity, property_name:str, property_type:OBinaryType=None, linked_type:OBinaryType=None, linked_class:str=None):
super().__init__()
self.__persistent_class = persistent_class
self.__class_name = getattr(persistent_class, '__name__')
self.__property_name = property_name
self.__property_type = property_type
self.__linked_type = linked_type
self.__linked_class = linked_class
def parse(self):
try:
query_string = io.StringIO()
if self.operationtype == OSQLOperationType.CREATE:
query_string.write("create property ")
query_string.write(self.__class_name)
query_string.write(".")
query_string.write(self.__property_name)
query_string.write(" ")
query_string.write(self.__property_type.name)
if self.__linked_type and self.__linked_class:
raise OPyClientException("two concurrent parameters set")
if self.__linked_type:
query_string.write(" ")
query_string.write(self.__linked_type.name)
elif self.__linked_class:
query_string.write(" ")
query_string.write(self.__linked_class)
elif self.operationtype == OSQLOperationType.DROP:
query_string.write("drop property ")
query_string.write(self.__class_name)
query_string.write(".")
query_string.write(self.__property_name)
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
@classmethod
def withlinkedclass(cls, persistent_class:BaseEntity, property_name:str, property_type:OBinaryType, linked_class:str=None):
return cls(persistent_class, property_name, property_type, None, linked_class)
@classmethod
def withlinkedtype(cls, persistent_class:BaseEntity, property_name:str, property_type:OBinaryType, linked_type:OBinaryType):
return cls(persistent_class, property_name, property_type, linked_type, None)
class Cluster(GraphType):
def __init__(self, name:str):
super().__init__()
self.name = name
def parse(self):
return self.name
class Class(GraphType):
def __init__(self, persistent_class:BaseEntity, class_type:OPlainClass=None):
super().__init__()
self.__persistent_class = persistent_class
self.__class_name = getattr(persistent_class, '__name__')
if(class_type):
self.__class_type = class_type.value
# save module to database to identify on later following selection process
self.__class_module = persistent_class.__module__
@classmethod
def field(cls, persistent_class:BaseEntity, fieldname:str):
obj = cls(persistent_class)
return "{}.{}".format(obj.__class_name, fieldname)
def classname(self):
return self.__class_name
def parse(self):
try:
query_string = io.StringIO()
if self.operationtype == OSQLOperationType.CREATE:
query_string.write("create class ") # start off with writing the common part of the command
query_string.write(escapeclassname(self.__class_name)) # append class name
query_string.write(" extends ")
query_string.write(self.__class_type)
elif self.operationtype == OSQLOperationType.DROP:
query_string.write("drop class ")
query_string.write(self.__class_name)
elif self.operationtype == OSQLOperationType.DELETE:
query_string.write("delete ")
if issubclass(self.__persistent_class, BaseEdge):
query_string.write(" edge ")
elif issubclass(self.__persistent_class, BaseVertex):
query_string.write(" vertex ")
query_string.write(self.__class_name)
elif self.operationtype == OSQLOperationType.MOVE:
query_string.write(self.__class_name)
result_query = query_string.getvalue()
return result_query
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Edges(GraphType):
def __init__(self, object):
super().__init__()
self.__object = object
def getobject(self):
return self.__object
class Edge(GraphType):
"""
CREATE EDGE <class> [CLUSTER <cluster>] FROM <rid>|(<query>)|[<rid>]* TO <rid>|(<query>)|[<rid>]*
[SET <field> = <expression>[,]*]|CONTENT {<JSON>}
[RETRY <retry> [WAIT <pauseBetweenRetriesInMs]]
TODO: implement properties, specific cluster, select
"""
def __init__(self, object:BaseEdge=None):
super().__init__()
self.__object = object
self.__class_name = self.__object.__class__.__name__
def getobject(self):
return self.__object
def parse(self):
result_query = None
try:
if isinstance(self.__object, BaseEdge):
query_string = io.StringIO()
if self.operationtype == OSQLOperationType.CREATE:
query_string.write("create edge ") # start off with writing the common part of the command
query_string.write(escapeclassname(self.__class_name)) # append class name
query_string.write(" from ")
query_string.write(self.__object.in_vertex.getrid())
query_string.write(" to ")
query_string.write(self.__object.out_vertex.getrid())
elif self.operationtype == OSQLOperationType.DELETE:
query_string.write("delete edge ") # start off with writing the common part of the command
result_query = query_string.getvalue()
logging.debug("result query: {}".format(result_query))
else:
raise WrongTypeException("object must implement BaseEdge class")
except Exception as err:
logging.error(err)
finally:
query_string.close()
return result_query
class Vertices(GraphType):
def __init__(self, object):
super().__init__()
self.__object = object
def getobject(self):
return self.__object
class Vertex(GraphType):
"""
Creates a new vertex by using the given class
"""
def __init__(self, object:BaseVertex=None):
super().__init__()
self.__object = object
self.__class_name = object.__class__.__name__
pass
def getobject(self):
return self.__object
def parse(self):
result_query = None
try:
if isinstance(self.__object, BaseVertex):
query_string = io.StringIO()
if self.operationtype == OSQLOperationType.CREATE:
query_string.write("create vertex ") # start off with writing the common part of the command
query_string.write(escapeclassname(self.__class_name)) # append class name
# read "marked" attributes
data_to_store = self.__object.persistentattributes()
count = len(data_to_store)
if count > 0:
query_string.write(" set ")
for i in range(count):
attr_name = data_to_store[i]
query_string.write(attr_name)
query_string.write(" = \"")
query_string.write(str(self.__object.__getattribute__(attr_name)))
query_string.write("\"")
if i < count-1:
query_string.write(" , ")
elif self.operationtype == OSQLOperationType.DELETE:
query_string.write("delete vertex ") # start off with writing the common part of the command
result_query = query_string.getvalue()
logging.debug("result query: {}".format(result_query))
else:
raise WrongTypeException("object must implement BaseVertex class")
except Exception as err:
logging.error(err)
finally:
query_string.close()
return result_query
class Insert(QueryType):
"""
Factory to build an insert statement out of the given data and class types
INSERT INTO [class:]<class>|cluster:<cluster>|index:<index>
[(<field>[,]*) VALUES (<expression>[,]*)[,]*]|
[SET <field> = <expression>|<sub-command>[,]*]|
[CONTENT {<JSON>}]|
[FROM <query>]
"""
def __init__(self, persistent_object:BaseVertex):
self.__clazz_name = getattr(persistent_object.__class__,'__name__')
self.__object = persistent_object
def parse(self):
try:
query_string = io.StringIO()
query_string.write("insert into ")
query_string.write(escapeclassname(self.__clazz_name))
query_string.write(" ")
persistent_attributes = self.__object.persistentattributes()
if len(persistent_attributes) > 0:
query_string.write("( ")
for attribute in persistent_attributes:
query_string.write(attribute)
if persistent_attributes.index(attribute) != len(persistent_attributes)-1:
query_string.write(", ")
query_string.write(" )")
query_string.write(" values ")
query_string.write("(")
if len(persistent_attributes) > 0:
for attribute in persistent_attributes:
query_string.write("'")
query_string.write(str(self.__object.__getattribute__(attribute)))
query_string.write("'")
if persistent_attributes.index(attribute) != len(persistent_attributes)-1:
query_string.write(",")
query_string.write(")")
result_query = query_string.getvalue()
return result_query
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Truncate(QueryType):
"""
"""
def __init__(self, target):
super().__init__()
self.__target = target
def parse(self):
try:
query_string = io.StringIO()
query_string.write("truncate class ")
if isinstance(self.__target, Class):
query_string.write(self.__target.classname())
result_string = query_string.getvalue()
logging.debug("parsed sql string: '{}'".format(result_string))
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
def getclass(self):
return self.__target
class Traverse(QueryType):
"""
TRAVERSE <[class.]field>|*|any()|all()
[FROM <target>]
[LET <Assignment>*]
WHILE <condition>
[LIMIT <max-records>]
[STRATEGY <strategy>]
"""
def __init__(self, target, props:list=None, *elements:QueryElement):
super().__init__()
self.__target = target
self.__props = props
self.__elements = elements
self.__query_rule_index = ["Let", "While", "Limit", "Strategy"]
self.__query_dict = dict()
self.fetchplan = ""
def parse(self):
try:
query_string = io.StringIO()
query_string.write("traverse ")
if isinstance(self.__props, str):
# this is the case for *, any(), all() or one class with field
query_string.write(self.__props)
elif isinstance(self.__props, list):
for i, field in enumerate(self.__props):
query_string.write(field)
if i < len(self.__props)-1:
query_string.write(", ")
query_string.write(" ")
query_string.write(" from ")
# a class, a cluster, list of clusters, a rid, list of rids, traverse or select
if isinstance(self.__target, Class):
query_string.write(self.__target.classname())
elif isinstance(self.__target, Traverse) or isinstance(self.__target, Select):
query_string.write(" ( ")
query_string.write(self.__target.parse())
query_string.write(" ) ")
elif isinstance(self.__target, Cluster):
query_string.write(self.__target.parse())
elif isinstance(self.__target, Prefixed):
query_string.write(getattr(self.__target.clazz,'__name__'))
query_string.write(self.__target.prefix)
elif isinstance(self.__target, list):
for i, target in enumerate(self.__target):
if isinstance(target, str):
query_string.write(target)
elif isinstance(target, Cluster):
query_string.write(target.parse())
if i < len(self.__props)-1:
query_string.write(", ")
elif isinstance(self.__target, str):
query_string.write(self.__target)
else:
logging.error("can't use traverse on type '{}'".format(type(self.__target)))
if self.__elements:
query_string.write(" ")
for element in self.__elements:
self.__query_dict[element.__class__.__name__] = str(element)
for key in self.__query_rule_index:
if key in self.__query_dict:
query_string.write(" ")
query_string.write(self.__query_dict[key])
result_string = query_string.getvalue()
logging.debug("parsed sql string: '{}' from data '{}'".format(result_string, self.__query_dict))
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
def getclass(self):
return self.__target
class Select(QueryType):
"""
Select statement which can be used to easily create a complete query
SELECT [<Projections>] [FROM <Target> [LET <Assignment>*]]
[WHERE <Condition>*]
[GROUP BY <Field>*]
[ORDER BY <Fields>* [ASC|DESC] *]
[SKIP <SkipRecords>]
[LIMIT <MaxRecords>]
[FETCHPLAN <FetchPlan>]
[TIMEOUT <Timeout> [<STRATEGY>]
[LOCK default|record]
[PARALLEL]
"""
def __init__(self, obj, props:list=None, *elements:QueryElement):
self.__elements = elements
self.__query_dict = dict()
self.__prefix = None
self.fetchplan = ""
if obj:
if isinstance(obj, Prefixed):
self.__clazz = obj.clazz
self.__prefix = obj.prefix
else:
self.__clazz = obj
else:
self.__clazz = None
self.__clazz_name = retrieveclassname(self.__clazz)
self.__query_rule_index = ["Let", "Where", "GroupBy", "OrderBy", "Skip", "Limit", "Fetchplan", "Timeout", "Lock", "Parallel"]
self.__props = props
@classmethod
def withfetchplan(cls, obj, fetchplan:str, props:list=None, *elements:QueryElement):
return_obj = cls(obj, props, elements)
return_obj.fetchplan = fetchplan
return return_obj
def parse(self):
try:
query_string = io.StringIO()
query_string.write("select ")
if len(self.__props) > 0:
for i, projection in enumerate(self.__props):
query_string.write(projection)
if i < len(self.__props)-1:
query_string.write(", ")
query_string.write(" ")
if self.__clazz:
query_string.write("from ")
query_string.write(escapeclassname(self.__clazz_name))
if self.__prefix:
query_string.write(" ")
query_string.write(self.__prefix)
for element in self.__elements:
self.__query_dict[element.__class__.__name__] = str(element)
for key in self.__query_rule_index:
if key in self.__query_dict:
query_string.write(" ")
query_string.write(self.__query_dict[key])
result_string = query_string.getvalue()
logging.debug("parsed sql string: '{}' from data '{}'".format(result_string, self.__query_dict))
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
def getclass(self):
return self.__clazz
class Let(QueryElement):
def __init__(self, name:str, assignment:str):
self.__name = name
self.__assignment = assignment
@classmethod
def byselect(cls, name:str, element:QueryElement):
try:
if not isinstance(element, Select):
raise SQLCommandException("currently only select is supported in let statements")
query_string = io.StringIO()
query_string.write("( ")
query_string.write(str(element))
query_string.write(" ) ")
result_query = query_string.getvalue()
except Exception as err:
logging.error(err)
finally:
query_string.close()
return cls(name, result_query)
@classmethod
def byfield(cls, name:str, assignment:str):
return cls(name, assignment)
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" let ")
query_string.write(self.__name)
query_string.write(" = ")
query_string.write(self.__assignment)
query_string.write(" ")
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Update(QueryType):
"""
Defines the update statement
UPDATE <class>|cluster:<cluster>|<recordID>
[SET|INCREMENT|ADD|REMOVE|PUT <field-name> = <field-value>[,]*]|[CONTENT|MERGE <JSON>]
[UPSERT]
[RETURN <returning> [<returning-expression>]]
[WHERE <conditions>]
[LOCK default|record]
[LIMIT <max-records>] [TIMEOUT <timeout>]
"""
def __init__(self, object, updateaction, *elements:QueryElement):
self.__query_rule_index = ["Upsert", "Return", "Where", "Lock", "Limit"]
self.__updateaction = updateaction
self.__elements = elements
self.__object = object
def parse(self):
try:
self.__query_dict = dict()
query_string = io.StringIO()
query_string.write("update ")
if isinstance(self.__object, str):
# its a rid
query_string.write(self.__object)
elif isinstance(self.__object, BaseEntity):
query_string.write(self.__object.getRID())
else:
# its a class
query_string.write(self.__object.__name__)
query_string.write(" ")
query_string.write(str(self.__updateaction))
for element in self.__elements:
self.__query_dict[element.__class__.__name__] = str(element)
for key in self.__query_rule_index:
if key in self.__query_dict:
query_string.write(" ")
query_string.write(self.__query_dict[key])
result_string = query_string.getvalue()
logging.debug("parsed sql string: '{}' from data '{}'".format(result_string, self.__query_dict))
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Upsert(QueryElement):
def __init__(self):
super().__init__()
self._query = " upsert "
class Lock(QueryElement):
def __init__(self, type:str):
super().__init__()
self._query = " lock {} ".format(type)
@classmethod
def default(cls):
return cls("default")
@classmethod
def record(cls):
return cls("record")
class Skip(QueryElement):
def __init__(self, count:int):
super().__init__()
self._query = " skip {} ".format(count)
class Limit(QueryElement):
def __init__(self, count:int, timeout:int=None):
super().__init__()
if timeout:
self._query = " limit {} timeout {} ".format(count, timeout)
else:
self._query = " limit {} ".format(count)
@classmethod
def withTimeout(cls, count:int, timeout:int):
return cls(count, timeout)
class Return(QueryElement):
"""
RETURN: If a field has been specified, then only this field will be returned
otherwise the whole record will be returned
"""
def __init__(self, type:str, field:str=None):
super().__init__()
if field:
self._query = " return {} {}".format(type, field)
else:
self._query = " return {}".format(type)
@classmethod
def count(cls):
return cls("count")
@classmethod
def after(cls, field:str):
return cls("after", field)
@classmethod
def before(cls, field:str):
return cls("before", field)
class Lock(QueryElement):
def __init__(self):
pass
class From(QueryElement):
def __init__(self, rid:str):
self.rid = rid
class To(QueryElement):
def __init__(self, rid:str):
self.rid = rid
class ActionElement(QueryElement):
def __init__(self, type:str, fields:dict):
self.__type = type
self.__fields = fields
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" ")
query_string.write(self.__type)
query_string.write(" ")
count = 1
for field_name in self.__fields:
query_string.write(field_name)
query_string.write(" = ")
if isinstance(self.__fields[field_name], str):
query_string.write("'")
query_string.write(str(self.__fields[field_name]))
query_string.write("'")
else:
query_string.write(str(self.__fields[field_name]))
if count < len(self.__fields):
query_string.write(", ")
count += 1
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Set(ActionElement):
def __init__(self, fields:dict):
super().__init__("set", fields)
class Increment(ActionElement):
def __init__(self, fields:dict):
super().__init__("increment", fields)
class Add(ActionElement):
def __init__(self, fields:dict):
super().__init__("add", fields)
class Remove(ActionElement):
def __init__(self, fields:dict):
super().__init__("remove", fields)
class Put(ActionElement):
def __init__(self, fields:dict):
super().__init__("put", fields)
class Content(ActionElement):
def __init__(self):
pass
class Merge(ActionElement):
def __init__(self):
pass
class Condition(WhereType):
def __init__(self, attribute_name:str):
super().__init__()
self.__attribute_name = attribute_name
def isle(self, value:object):
self._query = "{} <= {}".format(self.__attribute_name, self.__valuestring(value))
return self
def islt(self, value:object):
self._query = "{} < {}".format(self.__attribute_name, self.__valuestring(value))
return self
def isge(self, value:object):
self._query = "{} >= {}".format(self.__attribute_name, self.__valuestring(value))
return self
def isgt(self, value:object):
self._query = "{} > {}".format(self.__attribute_name, self.__valuestring(value))
return self
def iseq(self, value:object):
self._query = "{} = {}".format(self.__attribute_name, self.__valuestring(value))
return self
def isin(self, value:object):
pass
def __valuestring(self, value):
if isinstance(value, str):
return "'{}'".format(value)
elif isinstance(value, int):
return "{}".format(value)
class While(QueryElement):
def __init__(self, *objects):
super().__init__()
self.__elements = objects
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" while ")
for element in self.__elements:
if isinstance(element, Select):
query_string.write("(")
query_string.write(element.parse())
query_string.write(")")
else:
query_string.write(str(element))
query_string.write(" ")
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Where(QueryElement):
def __init__(self, *objects):
super().__init__()
self.__elements = objects
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" where ")
for element in self.__elements:
if isinstance(element, Select):
query_string.write("(")
query_string.write(element.parse())
query_string.write(")")
else:
query_string.write(str(element))
query_string.write(" ")
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class GroupBy(QueryElement):
def __init__(self, grouping_field:str):
self.__grouping_field = grouping_field
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" group by ")
query_string.write(self.__grouping_field)
query_string.write(" ")
result_query = query_string.getvalue()
return result_query
except Exception as err:
logging.error(err)
finally:
query_string.close()
class OrderBy(QueryElement):
def __init__(self, attribute_name:str, order:str="asc"):
self.__attribute_name = attribute_name
self.__order = order
@classmethod
def asc(cls, attribute_name:str):
return cls(attribute_name)
@classmethod
def desc(cls, attribute_name:str):
return cls(attribute_name, "desc")
def __str__(self):
try:
query_string = io.StringIO()
query_string.write(" order by ")
query_string.write(self.__attribute_name)
query_string.write(" ")
query_string.write(self.__order)
query_string.write(" ")
result_string = query_string.getvalue()
return result_string
except Exception as err:
logging.error(err)
finally:
query_string.close()
class Or(WhereType):
def __init__(self, *types:WhereType):
super().__init__()
try:
query_string = io.StringIO()
query_string.write(" ( ")
for i,type in enumerate(types):
query_string.write(str(type))
query_string.write(" ")
if i < len(types)-1:
query_string.write(" or ")
query_string.write(" ) ")
self._query = query_string.getvalue()
except Exception as err:
logging.error(err)
finally:
query_string.close()
class And(WhereType):
def __init__(self, *types:WhereType):
super().__init__()
try:
query_string = io.StringIO()
query_string.write(" ( ")
for i,type in enumerate(types):
query_string.write(str(type))
query_string.write(" ")
if i < len(types)-1:
query_string.write(" and ")
query_string.write(" ) ")
self._query = query_string.getvalue()
except Exception as err:
logging.error(err)
class Index(GraphType):
"""
CREATE INDEX <name> [ON <class-name> (prop-names)] <type> [<key-type>] METADATA [{<json-metadata>}]
If prop is type of LINKMAP or EMBEDDEDMAP you kann add "by key" or "by value" to the property name
"""
def __init__(self, obj):
super().__init__()
self.__obj = obj
self.__metadata = None
self.__type = None
self.__clazz = None
self.__properties = None
def on(self, clazz:BaseEntity, properties:list=None, type:OSQLIndexType=None):
self.__clazz = clazz
self.__properties = properties
self.__type = type
return self
def withmeta(self, metadata:str):
self.__metadata = metadata
return self
def parse(self):
try:
query_string = io.StringIO()
if self.operationtype == OSQLOperationType.CREATE:
query_string.write("create index ")
if isinstance(self.__obj, str):
if self.__properties:
query_string.write(self.__obj)
query_string.write(" on ")
query_string.write(self.__clazz.__name__)
query_string.write(" (")
for i,prop in enumerate(self.__properties):
query_string.write(prop)
if i < len(self.__properties)-1:
query_string.write(", ")
query_string.write(") ")
else:
query_string.write(self.__clazz.__name__)
query_string.write(".")
query_string.write(self.__obj)
if self.__type:
query_string.write(" ")
query_string.write(self.__type.value)
if self.__metadata:
query_string.write(" metadata ")
query_string.write(self.__metadata)
result_query = query_string.getvalue()
return result_query
except Exception as err:
logging.error(err)
finally:
query_string.close()
|
SaptakS/pune.pycon.org
|
refs/heads/master
|
symposion/schedule/migrations/0002_auto_20150723_0856.py
|
4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('symposion_schedule', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='presentation',
name='assets_url',
field=models.URLField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='presentation',
name='slides_url',
field=models.URLField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='presentation',
name='video_url',
field=models.URLField(default=b'', blank=True),
preserve_default=True,
),
]
|
rhndg/openedx
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_conditional.py
|
21
|
import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationManager
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationManager(source_location.course_key),
error_msg='random error message'
)
else:
source_descriptor = Mock(name='source_descriptor')
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock(name='child_descriptor')
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
descriptor_system.load_item = {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get
system.descriptor_runtime = descriptor_system
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
super(ConditionalModuleBasicTest, self).setUp()
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
super(ConditionalModuleXmlTest, self).setUp()
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, source_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
|
aleaf/pest_tools
|
refs/heads/master
|
par_sen.py
|
2
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class ParSen:
def __init__(self, jco_df, obs_dict, pars_dict, drop_regul = False, drop_groups = None, keep_groups = None):
''' Create ParSen class
Parameters
----------
jco_df : Pandas dataframe
Pandas data frame of the Jacobian returned from pest_tools.load_jco
obs_dict: dict
Dictionary of the observations returned from pest_tools.load_obs
pars_dict: dict
Dictionary of the parameters returned from pest_tools.load_pars
drop_regul: {False, True}, optional
Flag to drop regularization information in calculating parameter
sensitivity. Will set weight to zero for all observations with
'regul' in the observation group name
drop_groups: list, optional
List of observation groups to drop when calculating parameter
sensitivity. If all groups are part of regularization it may
be easier to use the drop_regul flag
keep_groups: list, optional
List of observation groups to include in calculating parameter
sensitivity. Sometimes easier to use when looking at sensitivity
to a single, or small number, or observation groups
Attributes
----------
df : Pandas DataFrame
DataFrame of parameter sensitivity. Index entries of the DataFrame
are the parameter names. The DataFrame has two columns:
1) Parameter Group and 2) Sensitivity
Notes
------
For drop_regul = True could alternatively remove regularization info
from jco_df but haven't found easy way to do so, particularly
with large jco
'''
# Build weights array
weights = []
ob_groups = []
for ob in jco_df.index:
weight = float(obs_dict[ob][1])
ob_group = obs_dict[ob][2]
# Set weights for regularization info to zero if drop_regul == True
if drop_regul == True and 'regul' in ob_group.lower():
weight = 0.0
# Set weights for obs in drop_groups to zero
if drop_groups != None:
# set all groups in drop_groups to lower case
drop_groups = [item.lower() for item in drop_groups]
if ob_group.lower() in drop_groups:
weight = 0.0
# Set weights for obs not in keep_group to zero
if keep_groups != None:
# set all groups in keep_groups to lower case
keep_groups = [item.lower() for item in keep_groups]
if ob_group.lower() not in keep_groups:
weight = 0.0
weights.append(weight)
ob_groups.append(ob_group)
# Get count of non-zero weights
n_nonzero_weights = np.count_nonzero(weights)
# Calculate sensitivities
sensitivities = []
for col in jco_df:
sen = np.linalg.norm(np.asarray(jco_df[col])*weights)/n_nonzero_weights
sensitivities.append(sen)
# Build Group Array
par_groups = []
for par in jco_df.columns:
par_group = pars_dict[par][5]
par_groups.append(par_group)
# Build pandas data frame of parameter sensitivities
sen_data = {'Sensitivity' : sensitivities, 'Parameter Group' : par_groups}
par_sen_df = pd.DataFrame(sen_data, index = jco_df.columns)
self.df = par_sen_df
def tail(self, n_tail):
''' Get the lest sensitive parameters
Parameters
----------
n_tail: int
Number of parameters to get
Returns
---------
pandas Series
Series of n_tail least sensitive parameters
'''
return self.df.sort(columns = 'Sensitivity', ascending = False).tail(n=n_tail)['Sensitivity']
def head(self, n_head):
''' Get the most sensitive parameters
Parameters
----------
n_head: int
Number of parameters to get
Returns
-------
pandas Series
Series of n_head most sensitive parameters
'''
return self.df.sort(columns = 'Sensitivity', ascending = False).head(n=n_head)['Sensitivity']
def par(self, parameter):
'''Return the sensitivity of a single parameter
Parameters
----------
parameter: string
Returns
---------
float
sensitivity of parameter
'''
return self.df.xs(parameter)['Sensitivity']
def group(self, group, n = None):
'''Return the sensitivites of a parameter group
Parameters
----------
group: string
n: {None, int}, optional
If None then return all parmeters from group, else n is the number
of paremeters to return.
If n is less than 0 then return the least sensitive parameters
If n is greater than 0 then retrun the most sensitive parameters
Returns
-------
Pandas DataFrame
'''
group = group.lower()
if n == None:
n_head = len(self.df.index)
else:
n_head = n
if n_head > 0:
#pars = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].head(n=n_head).index
#sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].head(n=n_head)
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group].head(n=n_head)
if n_head < 0:
n_head = abs(n_head)
#pars = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].tail(n=n_head).index
#sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].tail(n=n_head)
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group].tail(n=n_head)
sensitivity.index.name = 'Parameter'
return sensitivity
def sum_group (self):
''' Return sum of all parameters sensitivity by group
Returns
-------
Pandas DataFrame
'''
sen_grouped = self.df.groupby(['Parameter Group']).aggregate(np.sum).sort(columns = 'Sensitivity', ascending = False)
return sen_grouped
def plot(self, n = None, group = None):
''' Generate plot of parameter sensitivity
Paramters
----------
n: {None, int}, optional
If None then plot all parameters, else n is the number to plot.
If n is less than 0 then plot least sensitive parameters
If n is greater than 0 then plot most sensitive parameters
group: {None, str}, optional
Parameter group to plot
If None plot all parameter groups
Returns
-------
Matplotlib plot
Bar plot of parameter sensitivity
'''
plt.figure() ### Make New figure
if group == None:
if n == None:
n_head = len(self.df.index)
else:
n_head = n
if n_head > 0:
pars = self.df.sort(columns = 'Sensitivity', ascending = False).head(n=n_head)['Sensitivity'].index
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).head(n=n_head)['Sensitivity'].values
par_groups = self.df.sort(columns = 'Sensitivity', ascending = False).head(n=n_head)['Parameter Group'].values
if n_head < 0:
n_head = abs(n_head)
pars = self.df.sort(columns = 'Sensitivity', ascending = False).tail(n=n_head)['Sensitivity'].index
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).tail(n=n_head)['Sensitivity'].values
par_groups = self.df.sort(columns = 'Sensitivity', ascending = False).tail(n=n_head)['Parameter Group'].values
# Assign colors for each group
color_map = plt.get_cmap('Spectral')
color_dict = dict()
unique_par_groups = np.asarray(self.df.drop_duplicates(cols = 'Parameter Group')['Parameter Group'])
for i in range(len(unique_par_groups)):
color = color_map(1.*i/len(unique_par_groups))
color_dict[unique_par_groups[i]] = color
colors = []
for par_group in par_groups:
colors.append(color_dict[par_group])
plt.barh(np.arange(len(pars)), sensitivity, color = colors, align = 'center')
plt.yticks(np.arange(len(pars)), pars)
plt.ylim(-1, len(pars))
plt.xlabel('Parameter Sensitivity')
plt.ylabel('Parameter')
plt.grid(True, axis = 'x')
plt.tight_layout()
if group != None:
group = group.lower()
if n == None:
n_head = len(self.df.index)
else:
n_head = n
if n_head > 0:
pars = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].head(n=n_head).index
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].head(n=n_head).values
if n_head < 0:
n_head = abs(n_head)
pars = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].tail(n=n_head).index
sensitivity = self.df.sort(columns = 'Sensitivity', ascending = False).ix[self.df['Parameter Group'] == group, 'Sensitivity'].tail(n=n_head).values
plt.barh(np.arange(len(pars)), sensitivity, align = 'center')
plt.yticks(np.arange(len(pars)), pars)
plt.ylim(-1, len(pars))
plt.xlabel('Parameter Sensitivity')
plt.ylabel('Parameter')
plt.tight_layout()
def plot_sum_group (self):
''' Plot sum of all parameters sensitivity by group
Returns
-------
Matplotlib plot
Bar plot of sum of sensitivity by parameter group
'''
plt.figure() ## Make New Figure
sen_grouped = self.df.groupby(['Parameter Group']).aggregate(np.sum).sort(columns = 'Sensitivity', ascending = False)
pars = sen_grouped.index
sensitivity = sen_grouped.values
plt.barh(np.arange(len(pars)), sensitivity, align = 'center')
plt.yticks(np.arange(len(pars)), pars)
plt.ylim(-1, len(pars))
plt.xlabel('Sum of Parameter Sensitivity')
plt.ylabel('Parameter Group')
plt.grid(True, axis = 'x')
plt.tight_layout()
|
truekonrads/tweepy
|
refs/heads/master
|
tweepy/__init__.py
|
32
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.4.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError, RateLimitError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
from six.moves.http_client import HTTPConnection
HTTPConnection.debuglevel = level
|
sethc23/iTerm2
|
refs/heads/master
|
tools/ply/ply-3.4/test/yacc_notfunc.py
|
174
|
# -----------------------------------------------------------------------------
# yacc_notfunc.py
#
# p_rule not defined as a function
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
p_statement_assign = "Blah"
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
routeflow/AutomaticConfigurationRouteFlow
|
refs/heads/master
|
POX_CONTROLLER/debug-pox.py
|
47
|
#!/bin/sh -
# Copyright 2011-2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we
# use it.
# Otherwise, we try to use a Python interpreter called python2.7, which
# is a good idea if you're using Python from MacPorts, for example.
# We fall back to just "python" and hope that works.
''''true
#export OPT="-u -O"
export OPT="-u"
export FLG=""
if [ "$(basename $0)" = "debug-pox.py" ]; then
export OPT=""
export FLG="--debug"
fi
if [ -x pypy/bin/pypy ]; then
exec pypy/bin/pypy $OPT "$0" $FLG "$@"
fi
if type python2.7 > /dev/null 2> /dev/null; then
exec python2.7 $OPT "$0" $FLG "$@"
fi
exec python $OPT "$0" $FLG "$@"
'''
from pox.boot import boot
if __name__ == '__main__':
boot()
|
zstewar1/autotank
|
refs/heads/master
|
web/ioControl/xmlrpc.py
|
2
|
from django.http import HttpResponse
from django.utils import simplejson
from django.template import RequestContext, loader
from ioControl.models import Motor
import xmlrpclib
s = xmlrpclib.ServerProxy('http://192.168.0.108:8000', allow_none=True)
def changeTurretDirection(*args):
s.changeTurretDirection(*args)
def setTurretDirection(*args):
s.setTurretDirection(*args)
def setTurretSpeed(*args):
s.setTurretSpeed(*args)
def setTreadSpeedDir(*args):
s.setTreadSpeedDir(*args)
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/site-packages/wx-2.8-msw-unicode/wxPython/lib/ClickableHtmlWindow.py
|
6
|
## This file imports items from the wx package into the wxPython package for
## backwards compatibility. Some names will also have a 'wx' added on if
## that is how they used to be named in the old wxPython package.
import wx.lib.ClickableHtmlWindow
__doc__ = wx.lib.ClickableHtmlWindow.__doc__
wxPyClickableHtmlWindow = wx.lib.ClickableHtmlWindow.PyClickableHtmlWindow
|
lachtanek/pb138-address-visualisation
|
refs/heads/master
|
address_visualisation/visualisers/square_count.py
|
1
|
#!/usr/bin/python3
"""
Module with visualiser of count of squares in area in database.
"""
from address_visualisation import Visualiser
from address_visualisation.transform_to_feature_collection import feature_collection_from_areas
class SquareCountVisualiser(Visualiser):
"""
Visualiser which finds towns with most squares in database xml and turns information about them into geojson format.
...
Methods
-------
run()
Finds areas with most squares and converts information about them to geojson FeatureCollection.
"""
def run(self):
"""
Finds areas with most squares and converts information about them to geojson FeatureCollection.
For each region, it searches through xml tree for streets with 'nám' or 'Nám' in name and conts them.
The counts and information about areas are saved into 'okresy' and converted into geojson FeatureCollection.
Returns
-------
geojson.FeatureCollection
FeatureCollection containing Polygons of towns with most squares
"""
root = self.db_tree.getroot()
okresy = {okres.get("kod"): (0, okres.get("kod"), okres.find("Nazev").text) for okres in root.iter('Okres')}
obce_okresy = {obec.get("kod"): obec.get("okres") for obec in root.iter('Obec')}
for ulice in root.iter('Ulice'):
kod_obce = ulice.get("obec")
kod_okresu = obce_okresy[kod_obce]
nazev = ulice.find("Nazev").text
if "nám" in nazev or "Nám" in nazev:
(stary_pocet, _, nazev_okresu) = okresy[kod_okresu]
okresy[kod_okresu] = (stary_pocet + 1, kod_okresu, nazev_okresu)
return feature_collection_from_areas(okresy.values(), self.db_tree, 'Square count in area')
|
JrGoodle/clowder
|
refs/heads/master
|
clowder/cli/repo.py
|
1
|
"""Clowder command line repo controller
.. codeauthor:: Joe DeCapo <joe@polka.cat>
"""
import argparse
from clowder.clowder_controller import print_clowder_name
from clowder.environment import clowder_git_repo_required, clowder_repo_required, ENVIRONMENT
from clowder.git.clowder_repo import ClowderRepo, print_clowder_repo_status, print_clowder_repo_status_fetch
from clowder.util.connectivity import network_connection_required
from .util import add_parser_arguments
def add_repo_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('repo', help='Manage clowder repo')
repo_subparsers = parser.add_subparsers()
add_repo_add_parser(repo_subparsers)
add_repo_checkout_parser(repo_subparsers)
add_repo_clean_parser(repo_subparsers)
add_repo_commit_parser(repo_subparsers)
add_repo_pull_parser(repo_subparsers)
add_repo_push_parser(repo_subparsers)
add_repo_run_parser(repo_subparsers)
add_repo_status_parser(repo_subparsers)
def add_repo_add_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo add parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('add', help='Add files in clowder repo')
parser.set_defaults(func=add)
add_parser_arguments(parser, [
(['files'], dict(nargs='+', metavar='<file>', help='files to add'))
])
def add_repo_checkout_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo checkout parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('checkout', help='Checkout ref in clowder repo')
parser.set_defaults(func=checkout)
add_parser_arguments(parser, [
(['ref'], dict(nargs=1, metavar='<ref>', help='git ref to checkout'))
])
def add_repo_clean_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo clean parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('clean', help='Discard changes in clowder repo')
parser.set_defaults(func=clean)
def add_repo_commit_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo commit parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('commit', help='Commit current changes in clowder repo yaml files')
parser.set_defaults(func=commit)
add_parser_arguments(parser, [
(['message'], dict(nargs=1, metavar='<message>', help='commit message'))
])
def add_repo_pull_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo pull parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('pull', help='Pull upstream changes in clowder repo')
parser.set_defaults(func=pull)
def add_repo_push_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo push parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('push', help='Push changes in clowder repo')
parser.set_defaults(func=push)
def add_repo_run_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo run parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('run', help='Run command in clowder repo')
parser.set_defaults(func=run)
add_parser_arguments(parser, [
(['command'], dict(nargs=1, metavar='<command>', help='command to run in clowder repo directory'))
])
def add_repo_status_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder repo status parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
parser = subparsers.add_parser('status', help='Print clowder repo git status')
parser.set_defaults(func=status)
@print_clowder_name
@clowder_git_repo_required
@print_clowder_repo_status
def add(args) -> None:
"""Clowder repo add command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).add(args.files)
@print_clowder_name
@clowder_git_repo_required
@print_clowder_repo_status_fetch
def checkout(args) -> None:
"""Clowder repo checkout command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).checkout(args.ref[0])
@print_clowder_name
@clowder_git_repo_required
@print_clowder_repo_status
def clean(_) -> None:
"""Clowder repo clean command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).clean()
@print_clowder_name
@clowder_git_repo_required
@print_clowder_repo_status
def commit(args) -> None:
"""Clowder repo commit command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).commit(args.message[0])
@print_clowder_name
@clowder_git_repo_required
@network_connection_required
@print_clowder_repo_status_fetch
def pull(_) -> None:
"""Clowder repo pull command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).pull()
@print_clowder_name
@clowder_git_repo_required
@network_connection_required
@print_clowder_repo_status_fetch
def push(_) -> None:
"""Clowder repo push command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_git_repo_dir).push()
@print_clowder_name
@clowder_repo_required
@print_clowder_repo_status
def run(args) -> None:
"""Clowder repo run command private implementation"""
ClowderRepo(ENVIRONMENT.clowder_repo_dir).run_command(args.command[0])
@print_clowder_name
@clowder_repo_required
@print_clowder_repo_status
def status(_) -> None:
"""Clowder repo status command entry point"""
if ENVIRONMENT.clowder_git_repo_dir is not None:
ClowderRepo(ENVIRONMENT.clowder_repo_dir).git_status()
|
spotify/annoy
|
refs/heads/master
|
test/examples_test.py
|
4
|
import unittest
def execfile(fn):
with open(fn) as f:
exec(f.read())
def simple_test():
execfile('examples/simple_test.py')
def mmap_test():
execfile('examples/mmap_test.py')
def precision_test():
execfile('examples/precision_test.py')
|
theguardian/headphones
|
refs/heads/master
|
lib/unidecode/x010.py
|
252
|
data = (
'k', # 0x00
'kh', # 0x01
'g', # 0x02
'gh', # 0x03
'ng', # 0x04
'c', # 0x05
'ch', # 0x06
'j', # 0x07
'jh', # 0x08
'ny', # 0x09
'nny', # 0x0a
'tt', # 0x0b
'tth', # 0x0c
'dd', # 0x0d
'ddh', # 0x0e
'nn', # 0x0f
'tt', # 0x10
'th', # 0x11
'd', # 0x12
'dh', # 0x13
'n', # 0x14
'p', # 0x15
'ph', # 0x16
'b', # 0x17
'bh', # 0x18
'm', # 0x19
'y', # 0x1a
'r', # 0x1b
'l', # 0x1c
'w', # 0x1d
's', # 0x1e
'h', # 0x1f
'll', # 0x20
'a', # 0x21
'[?]', # 0x22
'i', # 0x23
'ii', # 0x24
'u', # 0x25
'uu', # 0x26
'e', # 0x27
'[?]', # 0x28
'o', # 0x29
'au', # 0x2a
'[?]', # 0x2b
'aa', # 0x2c
'i', # 0x2d
'ii', # 0x2e
'u', # 0x2f
'uu', # 0x30
'e', # 0x31
'ai', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'N', # 0x36
'\'', # 0x37
':', # 0x38
'', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'0', # 0x40
'1', # 0x41
'2', # 0x42
'3', # 0x43
'4', # 0x44
'5', # 0x45
'6', # 0x46
'7', # 0x47
'8', # 0x48
'9', # 0x49
' / ', # 0x4a
' // ', # 0x4b
'n*', # 0x4c
'r*', # 0x4d
'l*', # 0x4e
'e*', # 0x4f
'sh', # 0x50
'ss', # 0x51
'R', # 0x52
'RR', # 0x53
'L', # 0x54
'LL', # 0x55
'R', # 0x56
'RR', # 0x57
'L', # 0x58
'LL', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'B', # 0xa1
'G', # 0xa2
'D', # 0xa3
'E', # 0xa4
'V', # 0xa5
'Z', # 0xa6
'T`', # 0xa7
'I', # 0xa8
'K', # 0xa9
'L', # 0xaa
'M', # 0xab
'N', # 0xac
'O', # 0xad
'P', # 0xae
'Zh', # 0xaf
'R', # 0xb0
'S', # 0xb1
'T', # 0xb2
'U', # 0xb3
'P`', # 0xb4
'K`', # 0xb5
'G\'', # 0xb6
'Q', # 0xb7
'Sh', # 0xb8
'Ch`', # 0xb9
'C`', # 0xba
'Z\'', # 0xbb
'C', # 0xbc
'Ch', # 0xbd
'X', # 0xbe
'J', # 0xbf
'H', # 0xc0
'E', # 0xc1
'Y', # 0xc2
'W', # 0xc3
'Xh', # 0xc4
'OE', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'a', # 0xd0
'b', # 0xd1
'g', # 0xd2
'd', # 0xd3
'e', # 0xd4
'v', # 0xd5
'z', # 0xd6
't`', # 0xd7
'i', # 0xd8
'k', # 0xd9
'l', # 0xda
'm', # 0xdb
'n', # 0xdc
'o', # 0xdd
'p', # 0xde
'zh', # 0xdf
'r', # 0xe0
's', # 0xe1
't', # 0xe2
'u', # 0xe3
'p`', # 0xe4
'k`', # 0xe5
'g\'', # 0xe6
'q', # 0xe7
'sh', # 0xe8
'ch`', # 0xe9
'c`', # 0xea
'z\'', # 0xeb
'c', # 0xec
'ch', # 0xed
'x', # 0xee
'j', # 0xef
'h', # 0xf0
'e', # 0xf1
'y', # 0xf2
'w', # 0xf3
'xh', # 0xf4
'oe', # 0xf5
'f', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
' // ', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
nagyistoce/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor_task/tests/test_subtasks.py
|
146
|
"""
Unit tests for instructor_task subtasks.
"""
from uuid import uuid4
from mock import Mock, patch
from student.models import CourseEnrollment
from instructor_task.subtasks import queue_subtasks_for_query
from instructor_task.tests.factories import InstructorTaskFactory
from instructor_task.tests.test_base import InstructorTaskCourseTestCase
class TestSubtasks(InstructorTaskCourseTestCase):
"""Tests for subtasks."""
def setUp(self):
super(TestSubtasks, self).setUp()
self.initialize_course()
def _enroll_students_in_course(self, course_id, num_students):
"""Create and enroll some students in the course."""
for _ in range(num_students):
random_id = uuid4().hex[:8]
self.create_student(username='student{0}'.format(random_id))
def _queue_subtasks(self, create_subtask_fcn, items_per_task, initial_count, extra_count):
"""Queue subtasks while enrolling more students into course in the middle of the process."""
task_id = str(uuid4())
instructor_task = InstructorTaskFactory.create(
course_id=self.course.id,
task_id=task_id,
task_key='dummy_task_key',
task_type='bulk_course_email',
)
self._enroll_students_in_course(self.course.id, initial_count)
task_querysets = [CourseEnrollment.objects.filter(course_id=self.course.id)]
def initialize_subtask_info(*args): # pylint: disable=unused-argument
"""Instead of initializing subtask info enroll some more students into course."""
self._enroll_students_in_course(self.course.id, extra_count)
return {}
with patch('instructor_task.subtasks.initialize_subtask_info') as mock_initialize_subtask_info:
mock_initialize_subtask_info.side_effect = initialize_subtask_info
queue_subtasks_for_query(
entry=instructor_task,
action_name='action_name',
create_subtask_fcn=create_subtask_fcn,
item_querysets=task_querysets,
item_fields=[],
items_per_task=items_per_task,
total_num_items=initial_count,
)
def test_queue_subtasks_for_query1(self):
"""Test queue_subtasks_for_query() if the last subtask only needs to accommodate < items_per_tasks items."""
mock_create_subtask_fcn = Mock()
self._queue_subtasks(mock_create_subtask_fcn, 3, 7, 1)
# Check number of items for each subtask
mock_create_subtask_fcn_args = mock_create_subtask_fcn.call_args_list
self.assertEqual(len(mock_create_subtask_fcn_args[0][0][0]), 3)
self.assertEqual(len(mock_create_subtask_fcn_args[1][0][0]), 3)
self.assertEqual(len(mock_create_subtask_fcn_args[2][0][0]), 2)
def test_queue_subtasks_for_query2(self):
"""Test queue_subtasks_for_query() if the last subtask needs to accommodate > items_per_task items."""
mock_create_subtask_fcn = Mock()
self._queue_subtasks(mock_create_subtask_fcn, 3, 8, 3)
# Check number of items for each subtask
mock_create_subtask_fcn_args = mock_create_subtask_fcn.call_args_list
self.assertEqual(len(mock_create_subtask_fcn_args[0][0][0]), 3)
self.assertEqual(len(mock_create_subtask_fcn_args[1][0][0]), 3)
self.assertEqual(len(mock_create_subtask_fcn_args[2][0][0]), 5)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/building/general/shared_cloning_facility_general.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/general/shared_cloning_facility_general.iff"
result.attribute_template_id = -1
result.stfName("building_name","cloning_facility_general")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
carsongee/edx-platform
|
refs/heads/master
|
lms/lib/comment_client/utils.py
|
20
|
from contextlib import contextmanager
from dogapi import dog_stats_api
import logging
import requests
from django.conf import settings
from time import time
from uuid import uuid4
from django.utils.translation import get_language
log = logging.getLogger(__name__)
def strip_none(dic):
return dict([(k, v) for k, v in dic.iteritems() if v is not None])
def strip_blank(dic):
def _is_blank(v):
return isinstance(v, str) and len(v.strip()) == 0
return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)])
def extract(dic, keys):
if isinstance(keys, str):
return strip_none({keys: dic.get(keys)})
else:
return strip_none({k: dic.get(k) for k in keys})
def merge_dict(dic1, dic2):
return dict(dic1.items() + dic2.items())
@contextmanager
def request_timer(request_id, method, url, tags=None):
start = time()
with dog_stats_api.timer('comment_client.request.time', tags=tags):
yield
end = time()
duration = end - start
log.info(
"comment_client_request_log: request_id={request_id}, method={method}, "
"url={url}, duration={duration}".format(
request_id=request_id,
method=method,
url=url,
duration=duration
)
)
def perform_request(method, url, data_or_params=None, raw=False,
metric_action=None, metric_tags=None, paged_results=False):
if metric_tags is None:
metric_tags = []
metric_tags.append(u'method:{}'.format(method))
if metric_action:
metric_tags.append(u'action:{}'.format(metric_action))
if data_or_params is None:
data_or_params = {}
headers = {
'X-Edx-Api-Key': getattr(settings, "COMMENTS_SERVICE_KEY", None),
'Accept-Language': get_language(),
}
request_id = uuid4()
request_id_dict = {'request_id': request_id}
if method in ['post', 'put', 'patch']:
data = data_or_params
params = request_id_dict
else:
data = None
params = merge_dict(data_or_params, request_id_dict)
with request_timer(request_id, method, url, metric_tags):
response = requests.request(
method,
url,
data=data,
params=params,
headers=headers,
timeout=5
)
metric_tags.append(u'status_code:{}'.format(response.status_code))
if response.status_code > 200:
metric_tags.append(u'result:failure')
else:
metric_tags.append(u'result:success')
dog_stats_api.increment('comment_client.request.count', tags=metric_tags)
if 200 < response.status_code < 500:
raise CommentClientRequestError(response.text, response.status_code)
# Heroku returns a 503 when an application is in maintenance mode
elif response.status_code == 503:
raise CommentClientMaintenanceError(response.text)
elif response.status_code == 500:
raise CommentClient500Error(response.text)
else:
if raw:
return response.text
else:
try:
data = response.json()
except ValueError:
raise CommentClientError(
u"Comments service returned invalid JSON for request {request_id}; first 100 characters: '{content}'".format(
request_id=request_id,
content=response.text[:100]
)
)
if paged_results:
dog_stats_api.histogram(
'comment_client.request.paged.result_count',
value=len(data.get('collection', [])),
tags=metric_tags
)
dog_stats_api.histogram(
'comment_client.request.paged.page',
value=data.get('page', 1),
tags=metric_tags
)
dog_stats_api.histogram(
'comment_client.request.paged.num_pages',
value=data.get('num_pages', 1),
tags=metric_tags
)
return data
class CommentClientError(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return repr(self.message)
class CommentClientRequestError(CommentClientError):
def __init__(self, msg, status_code=400):
super(CommentClientRequestError, self).__init__(msg)
self.status_code = status_code
class CommentClient500Error(CommentClientError):
pass
class CommentClientMaintenanceError(CommentClientError):
pass
|
healthchecks/healthchecks
|
refs/heads/master
|
hc/front/tests/test_filtering_rules.py
|
2
|
from hc.api.models import Check
from hc.test import BaseTestCase
class FilteringRulesTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check.objects.create(project=self.project)
self.url = "/checks/%s/filtering_rules/" % self.check.code
self.redirect_url = "/checks/%s/details/" % self.check.code
def test_it_works(self):
payload = {
"subject": "SUCCESS",
"subject_fail": "ERROR",
"methods": "POST",
"manual_resume": "1",
"filter_by_subject": "yes",
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.subject, "SUCCESS")
self.assertEqual(self.check.subject_fail, "ERROR")
self.assertEqual(self.check.methods, "POST")
self.assertTrue(self.check.manual_resume)
def test_it_clears_method(self):
self.check.method = "POST"
self.check.save()
payload = {"subject": "SUCCESS", "methods": "", "filter_by_subject": "yes"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.methods, "")
def test_it_clears_subject(self):
self.check.subject = "SUCCESS"
self.check.subject_fail = "ERROR"
self.check.save()
payload = {
"methods": "",
"filter_by_subject": "no",
"subject": "foo",
"subject_fail": "bar",
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.subject, "")
self.assertEqual(self.check.subject_fail, "")
def test_it_clears_manual_resume_flag(self):
self.check.manual_resume = True
self.check.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data={"filter_by_subject": "no"})
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertFalse(self.check.manual_resume)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
payload = {
"subject": "SUCCESS",
"subject_fail": "ERROR",
"methods": "POST",
"manual_resume": "1",
"filter_by_subject": "yes",
}
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, payload)
self.assertEqual(r.status_code, 403)
|
caotianwei/django
|
refs/heads/master
|
tests/project_template/test_settings.py
|
274
|
import unittest
from django.test import TestCase
from django.utils import six
@unittest.skipIf(six.PY2,
'Python 2 cannot import the project template because '
'django/conf/project_template doesn\'t have an __init__.py file.')
class TestStartProjectSettings(TestCase):
def test_middleware_classes_headers(self):
"""
Ensure headers sent by the default MIDDLEWARE_CLASSES do not
inadvertently change. For example, we never want "Vary: Cookie" to
appear in the list since it prevents the caching of responses.
"""
from django.conf.project_template.project_name.settings import MIDDLEWARE_CLASSES
with self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
ROOT_URLCONF='project_template.urls',
):
response = self.client.get('/empty/')
headers = sorted(response.serialize_headers().split(b'\r\n'))
self.assertEqual(headers, [
b'Content-Type: text/html; charset=utf-8',
b'X-Frame-Options: SAMEORIGIN',
])
|
cedk/odoo
|
refs/heads/8.0
|
openerp/addons/test_workflow/models.py
|
337
|
# -*- coding: utf-8 -*-
import openerp.osv.orm
class m(openerp.osv.orm.Model):
""" A model for which we will define a workflow (see data.xml). """
_name = 'test.workflow.model'
def print_(self, cr, uid, ids, s, context=None):
print ' Running activity `%s` for record %s' % (s, ids)
return True
def print_a(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'a', context)
def print_b(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'b', context)
def print_c(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'c', context)
def condition(self, cr, uid, ids, context=None):
m = self.pool['test.workflow.trigger']
for r in m.browse(cr, uid, [1], context=context):
if not r.value:
return False
return True
def trigger(self, cr, uid, context=None):
return openerp.workflow.trg_trigger(uid, 'test.workflow.trigger', 1, cr)
class n(openerp.osv.orm.Model):
""" A model used for the trigger feature. """
_name = 'test.workflow.trigger'
_columns = { 'value': openerp.osv.fields.boolean('Value') }
_defaults = { 'value': False }
class a(openerp.osv.orm.Model):
_name = 'test.workflow.model.a'
_columns = { 'value': openerp.osv.fields.integer('Value') }
_defaults = { 'value': 0 }
class b(openerp.osv.orm.Model):
_name = 'test.workflow.model.b'
_inherit = 'test.workflow.model.a'
class c(openerp.osv.orm.Model):
_name = 'test.workflow.model.c'
_inherit = 'test.workflow.model.a'
class d(openerp.osv.orm.Model):
_name = 'test.workflow.model.d'
_inherit = 'test.workflow.model.a'
class e(openerp.osv.orm.Model):
_name = 'test.workflow.model.e'
_inherit = 'test.workflow.model.a'
for name in 'bcdefghijkl':
#
# Do not use type() to create the class here, but use the class construct.
# This is because the __module__ of the new class would be the one of the
# metaclass that provides method __new__!
#
class NewModel(openerp.osv.orm.Model):
_name = 'test.workflow.model.%s' % name
_inherit = 'test.workflow.model.a'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pidydx/grr
|
refs/heads/master
|
grr/gui/api_plugins/report_plugins/report_plugins_test.py
|
1
|
#!/usr/bin/env python
"""Tests for report plugins."""
import itertools
import math
import os
from grr.gui.api_plugins import stats as stats_api
from grr.gui.api_plugins.report_plugins import client_report_plugins
from grr.gui.api_plugins.report_plugins import filestore_report_plugins
from grr.gui.api_plugins.report_plugins import rdf_report_plugins
from grr.gui.api_plugins.report_plugins import report_plugins
from grr.gui.api_plugins.report_plugins import report_plugins_test_mocks
from grr.gui.api_plugins.report_plugins import server_report_plugins
from grr.lib import aff4
from grr.lib import client_fixture
from grr.lib import events
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import filestore_test_lib
from grr.lib.flows.cron import filestore_stats
from grr.lib.flows.cron import system as cron_system
from grr.lib.flows.general import audit
from grr.lib.rdfvalues import paths as rdf_paths
class ReportPluginsTest(test_lib.GRRBaseTest):
def testGetAvailableReportPlugins(self):
"""Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses."""
with report_plugins_test_mocks.MockedReportPlugins():
self.assertTrue(report_plugins_test_mocks.FooReportPlugin in
report_plugins.GetAvailableReportPlugins())
self.assertTrue(report_plugins_test_mocks.BarReportPlugin in
report_plugins.GetAvailableReportPlugins())
def testGetReportByName(self):
"""Ensure GetReportByName instantiates correct subclasses based on name."""
with report_plugins_test_mocks.MockedReportPlugins():
report_object = report_plugins.GetReportByName("BarReportPlugin")
self.assertTrue(
isinstance(report_object, report_plugins_test_mocks.BarReportPlugin))
def testGetReportDescriptor(self):
"""Ensure GetReportDescriptor returns a correctly filled in proto."""
desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()
self.assertEqual(desc.type,
rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)
self.assertEqual(desc.title, "Bar Activity")
self.assertEqual(desc.summary,
"Reports bars' activity in the given time range.")
self.assertEqual(desc.requires_time_range, True)
def AddFakeAuditLog(description=None,
client=None,
user=None,
token=None,
**kwargs):
events.Events.PublishEventInline(
"Audit",
events.AuditEvent(
description=description, client=client, user=user, **kwargs),
token=token)
class ReportUtilsTest(test_lib.GRRBaseTest):
def setUp(self):
super(ReportUtilsTest, self).setUp()
audit.AuditEventListener.created_logs.clear()
def testAuditLogsForTimespan(self):
two_weeks_ago = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("2w")
with test_lib.FakeTime(two_weeks_ago):
AddFakeAuditLog("Fake outdated audit log.", token=self.token)
AddFakeAuditLog("Fake audit description foo.", token=self.token)
AddFakeAuditLog("Fake audit description bar.", token=self.token)
audit_events = {
ev.description: ev
for fd in audit.AuditLogsForTimespan(
rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d"),
rdfvalue.RDFDatetime.Now(),
token=self.token) for ev in fd.GenerateItems()
}
self.assertIn("Fake audit description foo.", audit_events)
self.assertIn("Fake audit description bar.", audit_events)
self.assertNotIn("Fake outdated audit log.", audit_events)
class ClientReportPluginsTest(test_lib.GRRBaseTest):
def MockClients(self):
# We are only interested in the client object (path = "/" in client VFS)
fixture = test_lib.FilterFixture(regex="^/$")
# Make 10 windows clients
for i in range(0, 10):
test_lib.ClientFixture("C.0%015X" % i, token=self.token, fixture=fixture)
with aff4.FACTORY.Open(
"C.0%015X" % i, mode="rw", token=self.token) as client:
client.AddLabels("Label1", "Label2", owner="GRR")
client.AddLabels("UserLabel", owner="jim")
# Make 10 linux clients 12 hours apart.
for i in range(0, 10):
test_lib.ClientFixture(
"C.1%015X" % i,
token=self.token,
fixture=client_fixture.LINUX_FIXTURE)
def testGRRVersionReportPlugin(self):
self.MockClients()
# Scan for activity to be reported.
for _ in test_lib.TestFlowHelper(
cron_system.GRRVersionBreakDown.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.GRRVersion30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.LINE_CHART)
self.assertEqual(len(api_report_data.line_chart.data), 1)
self.assertEqual(api_report_data.line_chart.data[0].label, "GRR Monitor 1")
self.assertEqual(len(api_report_data.line_chart.data[0].points), 1)
self.assertEqual(api_report_data.line_chart.data[0].points[0].y, 20)
def testGRRVersionReportPluginWithNoActivityToReport(self):
# Scan for activity to be reported.
for _ in test_lib.TestFlowHelper(
cron_system.GRRVersionBreakDown.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.GRRVersion30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.LINE_CHART,
line_chart=rdf_report_plugins.ApiLineChartReportData(data=[])))
def testLastActiveReportPlugin(self):
self.MockClients()
# Scan for activity to be reported.
for _ in test_lib.TestFlowHelper(
cron_system.LastAccessStats.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.LastActiveReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.LINE_CHART)
labels = [
"60 day active", "30 day active", "7 day active", "3 day active",
"1 day active"
]
ys = [20, 20, 0, 0, 0]
for series, label, y in itertools.izip(api_report_data.line_chart.data,
labels, ys):
self.assertEqual(series.label, label)
self.assertEqual(len(series.points), 1)
self.assertEqual(series.points[0].y, y)
def testLastActiveReportPluginWithNoActivityToReport(self):
# Scan for activity to be reported.
for _ in test_lib.TestFlowHelper(
cron_system.LastAccessStats.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.LastActiveReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.LINE_CHART,
line_chart=rdf_report_plugins.ApiLineChartReportData(data=[])))
def testOSBreakdownReportPlugin(self):
# Add a client to be reported.
self.SetupClients(1)
# Scan for clients to be reported (the one we just added).
for _ in test_lib.TestFlowHelper(
cron_system.OSBreakDown.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.OSBreakdown30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[
rdf_report_plugins.ApiReportDataPoint1D(label="Unknown", x=1)
]),
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART))
def testOSBreakdownReportPluginWithNoDataToReport(self):
report = report_plugins.GetReportByName(
client_report_plugins.OSBreakdown30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[]),
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART))
def testOSReleaseBreakdownReportPlugin(self):
# Add a client to be reported.
self.SetupClients(1)
# Scan for clients to be reported (the one we just added).
for _ in test_lib.TestFlowHelper(
cron_system.OSBreakDown.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
client_report_plugins.OSReleaseBreakdown30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[
rdf_report_plugins.ApiReportDataPoint1D(label="Unknown", x=1)
]),
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART))
def testOSReleaseBreakdownReportPluginWithNoDataToReport(self):
report = report_plugins.GetReportByName(
client_report_plugins.OSReleaseBreakdown30ReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__, client_label="All"),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[]),
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART))
class FileStoreReportPluginsTest(test_lib.GRRBaseTest):
def checkStaticData(self, api_report_data):
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.STACK_CHART)
labels = [
"0 B - 2 B", "2 B - 50 B", "50 B - 100 B", "100 B - 1000 B",
"1000 B - 9.8 KiB", "9.8 KiB - 97.7 KiB", "97.7 KiB - 488.3 KiB",
"488.3 KiB - 976.6 KiB", "976.6 KiB - 4.8 MiB", "4.8 MiB - 9.5 MiB",
"9.5 MiB - 47.7 MiB", "47.7 MiB - 95.4 MiB", "95.4 MiB - 476.8 MiB",
"476.8 MiB - 953.7 MiB", "953.7 MiB - 4.7 GiB", "4.7 GiB - 9.3 GiB",
u"9.3 GiB - \u221E"
]
xs = [0.] + [
math.log10(x)
for x in [
2, 50, 100, 1e3, 10e3, 100e3, 500e3, 1e6, 5e6, 10e6, 50e6, 100e6,
500e6, 1e9, 5e9, 10e9
]
]
for series, label, x in itertools.izip(api_report_data.stack_chart.data,
labels, xs):
self.assertEqual(series.label, label)
self.assertAlmostEqual([p.x for p in series.points], [x])
self.assertEqual(api_report_data.stack_chart.bar_width, .2)
self.assertEqual([t.label for t in api_report_data.stack_chart.x_ticks], [
"1 B", "32 B", "1 KiB", "32 KiB", "1 MiB", "32 MiB", "1 GiB", "32 GiB",
"1 TiB", "32 TiB", "1 PiB", "32 PiB", "1024 PiB", "32768 PiB",
"1048576 PiB"
])
self.assertAlmostEqual(api_report_data.stack_chart.x_ticks[0].x, 0.)
for diff in (
t2.x - t1.x
for t1, t2 in itertools.izip(api_report_data.stack_chart.x_ticks[:-1],
api_report_data.stack_chart.x_ticks[1:])):
self.assertAlmostEqual(math.log10(32), diff)
def testFileSizeDistributionReportPlugin(self):
filename = "winexec_img.dd"
client_id, = self.SetupClients(1)
# Add a file to be reported.
filestore_test_lib.AddFileToFileStore(
rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=os.path.join(self.base_path, filename)),
client_id=client_id,
token=self.token)
# Scan for files to be reported (the one we just added).
for _ in test_lib.TestFlowHelper(
filestore_stats.FilestoreStatsCronFlow.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
filestore_report_plugins.FileSizeDistributionReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(name=report.__class__.__name__),
token=self.token)
self.checkStaticData(api_report_data)
for series in api_report_data.stack_chart.data:
if series.label == "976.6 KiB - 4.8 MiB":
self.assertEqual([p.y for p in series.points], [1])
else:
self.assertEqual([p.y for p in series.points], [0])
def testFileSizeDistributionReportPluginWithNothingToReport(self):
# Scan for files to be reported.
for _ in test_lib.TestFlowHelper(
filestore_stats.FilestoreStatsCronFlow.__name__, token=self.token):
pass
report = report_plugins.GetReportByName(
filestore_report_plugins.FileSizeDistributionReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(name=report.__class__.__name__),
token=self.token)
self.checkStaticData(api_report_data)
for series in api_report_data.stack_chart.data:
self.assertEqual([p.y for p in series.points], [0])
class ServerReportPluginsTest(test_lib.GRRBaseTest):
def setUp(self):
super(ServerReportPluginsTest, self).setUp()
audit.AuditEventListener.created_logs.clear()
def testClientApprovalsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.CLIENT_APPROVAL_BREAK_GLASS_REQUEST,
user="User123",
description="Approval request description.",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22"), increment=1):
for i in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST,
user="User%d" % i,
description="Approval request.",
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.CLIENT_APPROVAL_GRANT,
user="User456",
description="Grant.",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.ClientApprovalsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.AUDIT_CHART)
self.assertEqual(api_report_data.audit_chart.used_fields,
["action", "client", "description", "timestamp", "user"])
self.assertEqual([(row.action, row.client, row.description, row.user)
for row in api_report_data.audit_chart.rows],
[(events.AuditEvent.Action.CLIENT_APPROVAL_GRANT, None,
"Grant.", "User456"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User9"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User8"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User7"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User6"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User5"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User4"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User3"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User2"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User1"),
(events.AuditEvent.Action.CLIENT_APPROVAL_REQUEST, None,
"Approval request.", "User0")]) # pyformat: disable
def testClientApprovalsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.ClientApprovalsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.AUDIT_CHART,
audit_chart=rdf_report_plugins.ApiAuditChartReportData(
used_fields=[
"action", "client", "description", "timestamp",
"user"
],
rows=[])))
def testHuntActionsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_CREATED,
user="User123",
flow_name="Flow123",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22"), increment=1):
for i in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_MODIFIED,
user="User%d" % i,
flow_name="Flow%d" % i,
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_PAUSED,
user="User456",
flow_name="Flow456",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.HuntActionsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.AUDIT_CHART)
self.assertEqual(
api_report_data.audit_chart.used_fields,
["action", "description", "flow_name", "timestamp", "urn", "user"])
self.assertEqual([(row.action, row.description, row.flow_name,
row.timestamp.Format("%Y/%m/%d"), row.urn, row.user)
for row in api_report_data.audit_chart.rows],
[(events.AuditEvent.Action.HUNT_PAUSED, "", "Flow456",
"2012/12/22", None, "User456"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow9",
"2012/12/22", None, "User9"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow8",
"2012/12/22", None, "User8"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow7",
"2012/12/22", None, "User7"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow6",
"2012/12/22", None, "User6"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow5",
"2012/12/22", None, "User5"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow4",
"2012/12/22", None, "User4"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow3",
"2012/12/22", None, "User3"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow2",
"2012/12/22", None, "User2"),
(events.AuditEvent.Action.HUNT_MODIFIED, "", "Flow1",
"2012/12/22", None, "User1"),
(events.AuditEvent.Action.HUNT_MODIFIED, "",
"Flow0", "2012/12/22", None, "User0")
]) # pyformat: disable
def testHuntActionsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.HuntActionsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.AUDIT_CHART,
audit_chart=rdf_report_plugins.ApiAuditChartReportData(
used_fields=[
"action", "description", "flow_name",
"timestamp", "urn", "user"
],
rows=[])))
def testHuntApprovalsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_APPROVAL_GRANT,
user="User123",
description="Approval grant description.",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22"), increment=1):
for i in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
user="User%d" % i,
description="Approval request.",
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.HUNT_APPROVAL_GRANT,
user="User456",
description="Another grant.",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.HuntApprovalsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.AUDIT_CHART)
self.assertEqual(api_report_data.audit_chart.used_fields,
["action", "description", "timestamp", "urn", "user"])
self.assertEqual([(row.action, row.description,
row.timestamp.Format("%Y/%m/%d"), row.urn, row.user)
for row in api_report_data.audit_chart.rows],
[(events.AuditEvent.Action.HUNT_APPROVAL_GRANT,
"Another grant.", "2012/12/22", None, "User456"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User9"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User8"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User7"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User6"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User5"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User4"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User3"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User2"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User1"),
(events.AuditEvent.Action.HUNT_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User0")
]) # pyformat: disable
def testHuntApprovalsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.HuntApprovalsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.AUDIT_CHART,
audit_chart=rdf_report_plugins.ApiAuditChartReportData(
used_fields=[
"action", "description", "timestamp", "urn",
"user"
],
rows=[])))
def testCronApprovalsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.CRON_APPROVAL_GRANT,
user="User123",
description="Approval grant description.",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22"), increment=1):
for i in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
user="User%d" % i,
description="Approval request.",
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.CRON_APPROVAL_GRANT,
user="User456",
description="Another grant.",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.CronApprovalsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data.representation_type,
rdf_report_plugins.ApiReportData.RepresentationType.AUDIT_CHART)
self.assertEqual(api_report_data.audit_chart.used_fields,
["action", "description", "timestamp", "urn", "user"])
self.assertEqual([(row.action, row.description,
row.timestamp.Format("%Y/%m/%d"), row.urn, row.user)
for row in api_report_data.audit_chart.rows],
[(events.AuditEvent.Action.CRON_APPROVAL_GRANT,
"Another grant.", "2012/12/22", None, "User456"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User9"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User8"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User7"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User6"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User5"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User4"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User3"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User2"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User1"),
(events.AuditEvent.Action.CRON_APPROVAL_REQUEST,
"Approval request.", "2012/12/22", None, "User0")
]) # pyformat: disable
def testCronApprovalsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.CronApprovalsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.AUDIT_CHART,
audit_chart=rdf_report_plugins.ApiAuditChartReportData(
used_fields=[
"action", "description", "timestamp", "urn",
"user"
],
rows=[])))
def testMostActiveUsersReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
"Fake audit description 14 Dec.",
"C.123",
"User123",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
for _ in xrange(10):
AddFakeAuditLog(
"Fake audit description 22 Dec.",
"C.123",
"User123",
token=self.token)
AddFakeAuditLog(
"Fake audit description 22 Dec.",
"C.456",
"User456",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.MostActiveUsersReportPlugin.__name__)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/31")):
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
# pyformat: disable
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART,
pie_chart=rdf_report_plugins.ApiPieChartReportData(
data=[
rdf_report_plugins.ApiReportDataPoint1D(
label="User123",
x=11
),
rdf_report_plugins.ApiReportDataPoint1D(
label="User456",
x=1
)
]
)))
# pyformat: enable
def testMostActiveUsersReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.MostActiveUsersReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.PIE_CHART,
pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[])))
def testSystemFlowsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="GRR",
flow_name="Flow123",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
for _ in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="GRR",
flow_name="Flow123",
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="GRR",
flow_name="Flow456",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.SystemFlowsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(
x_ticks=[],
data=[
rdf_report_plugins.ApiReportDataSeries2D(
label=u"Flow123\u2003Run By: GRR (10)",
points=[
rdf_report_plugins.ApiReportDataPoint2D(x=0, y=10)
]), rdf_report_plugins.ApiReportDataSeries2D(
label=u"Flow456\u2003Run By: GRR (1)",
points=[
rdf_report_plugins.ApiReportDataPoint2D(
x=1, y=1)
])
])))
def testSystemFlowsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.SystemFlowsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(x_ticks=[])))
def testUserActivityReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
"Fake audit description 14 Dec.",
"C.123",
"User123",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
for _ in xrange(10):
AddFakeAuditLog(
"Fake audit description 22 Dec.",
"C.123",
"User123",
token=self.token)
AddFakeAuditLog(
"Fake audit description 22 Dec.",
"C.456",
"User456",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.UserActivityReportPlugin.__name__)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/31")):
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(name=report.__class__.__name__),
token=self.token)
# pyformat: disable
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(
data=[
rdf_report_plugins.ApiReportDataSeries2D(
label=u"User123",
points=[
rdf_report_plugins.ApiReportDataPoint2D(
x=-10, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-9, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-8, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-7, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-6, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-5, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-4, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-3, y=1),
rdf_report_plugins.ApiReportDataPoint2D(
x=-2, y=10),
rdf_report_plugins.ApiReportDataPoint2D(
x=-1, y=0)
]
),
rdf_report_plugins.ApiReportDataSeries2D(
label=u"User456",
points=[
rdf_report_plugins.ApiReportDataPoint2D(
x=-10, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-9, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-8, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-7, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-6, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-5, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-4, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-3, y=0),
rdf_report_plugins.ApiReportDataPoint2D(
x=-2, y=1),
rdf_report_plugins.ApiReportDataPoint2D(
x=-1, y=0)
])])))
# pyformat: enable
def testUserActivityReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.UserActivityReportPlugin.__name__)
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(name=report.__class__.__name__),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(data=[])))
def testUserFlowsReportPlugin(self):
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="User123",
flow_name="Flow123",
token=self.token)
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
for _ in xrange(10):
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="User123",
flow_name="Flow123",
token=self.token)
AddFakeAuditLog(
action=events.AuditEvent.Action.RUN_FLOW,
user="User456",
flow_name="Flow456",
token=self.token)
report = report_plugins.GetReportByName(
server_report_plugins.UserFlowsReportPlugin.__name__)
start = rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15")
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=start,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(
x_ticks=[],
data=[
rdf_report_plugins.ApiReportDataSeries2D(
label=u"Flow123\u2003Run By: User123 (10)",
points=[
rdf_report_plugins.ApiReportDataPoint2D(x=0, y=10)
]), rdf_report_plugins.ApiReportDataSeries2D(
label=u"Flow456\u2003Run By: User456 (1)",
points=[
rdf_report_plugins.ApiReportDataPoint2D(
x=1, y=1)
])
])))
def testUserFlowsReportPluginWithNoActivityToReport(self):
report = report_plugins.GetReportByName(
server_report_plugins.UserFlowsReportPlugin.__name__)
now = rdfvalue.RDFDatetime().Now()
month_duration = rdfvalue.Duration("30d")
api_report_data = report.GetReportData(
stats_api.ApiGetReportArgs(
name=report.__class__.__name__,
start_time=now - month_duration,
duration=month_duration),
token=self.token)
self.assertEqual(
api_report_data,
rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.
RepresentationType.STACK_CHART,
stack_chart=rdf_report_plugins.ApiStackChartReportData(x_ticks=[])))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
kcsry/lippukala
|
refs/heads/master
|
lippukala/models/code.py
|
1
|
from random import randint, choice
from string import digits
from django.db import models
from django.utils.timezone import now
from lippukala.consts import CODE_STATUS_CHOICES, UNUSED, USED
from lippukala.excs import CantUseException
from lippukala.models import Order
import lippukala.settings as settings
class Code(models.Model):
""" Encapsulates a single code, belonging to an order, that may be used to claim one or more products, as described in product_text. """
order = models.ForeignKey(Order, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=CODE_STATUS_CHOICES, default=UNUSED)
used_on = models.DateTimeField(blank=True, null=True)
used_at = models.CharField(max_length=64, blank=True, help_text="Station at which code was used")
prefix = models.CharField(max_length=16, blank=True, editable=False)
code = models.CharField(max_length=64, unique=True, editable=False)
literate_code = models.CharField(max_length=256, blank=True, editable=False)
product_text = models.CharField(max_length=512, blank=True, editable=False)
full_code = property(lambda self: f"{self.prefix}{self.code}")
is_used = property(lambda self: self.status == USED)
def __str__(self):
return f"Code {self.full_code} ({self.literate_code}) ({self.get_status_display()})"
def _generate_code(self):
qs = self.__class__.objects
for attempt in range(500): # 500 attempts really REALLY should be enough.
n_digits = randint(settings.CODE_MIN_N_DIGITS, settings.CODE_MAX_N_DIGITS + 1)
code = ("".join(choice(digits) for x in range(n_digits)))
if not settings.CODE_ALLOW_LEADING_ZEROES:
code = code.lstrip("0")
# Leading zeroes could have dropped digits off the code, so recheck that.
if settings.CODE_MIN_N_DIGITS <= len(code) <= settings.CODE_MAX_N_DIGITS:
if not qs.filter(code=code).exists():
return code
raise ValueError("Unable to find an unused code! Is the keyspace exhausted?")
def _generate_literate_code(self):
keyspace = (settings.LITERATE_KEYSPACES.get(self.prefix) or settings.LITERATE_KEYSPACES.get(None))
if not keyspace: # When absolutely no keyspaces can be found, assume (prefix+code) will do
return self.full_code
bits = []
val = int(self.code, 10)
n = len(keyspace)
assert val > 0
while val > 0:
val, digit = divmod(val, n)
bits.append(keyspace[digit])
bits = bits[::-1] # We have to reverse `bits` to get the least significant digit to be the first word.
if self.prefix: # Oh -- and if we had a prefix, add its literate counterpart now.
bits.insert(0, settings.PREFIXES[self.prefix])
return " ".join(bits).strip()
def _check_sanity(self):
if self.used_on and self.status != USED:
raise ValueError("Un-sane situation detected: saving Code with used status and no usage date")
if self.status != UNUSED and not self.pk:
raise ValueError("Un-sane situation detected: initial save of code with non-virgin status!")
if not all(c in digits for c in self.full_code):
raise ValueError("Un-sane situation detected: full_code contains non-digits. (This might mean a contaminated prefix configuration.)")
if not settings.PREFIX_MAY_BE_BLANK and not self.prefix:
raise ValueError("Un-sane situation detected: prefix may not be blank")
if self.prefix and self.prefix not in settings.PREFIXES:
raise ValueError(f"Un-sane situation detected: prefix {self.prefix!r} is not in PREFIXES")
def save(self, *args, **kwargs):
if not self.code:
self.code = self._generate_code()
if not self.literate_code:
self.literate_code = self._generate_literate_code()
self._check_sanity()
return super().save(*args, **kwargs)
def set_used(self, save=True, used_at=""):
if self.status != UNUSED:
raise CantUseException(f"Can't use a code in {self.get_status_display()} status!")
self.status = USED
self.used_on = now()
self.used_at = used_at
if save:
return self.save()
|
potash/scikit-learn
|
refs/heads/master
|
sklearn/datasets/lfw.py
|
11
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
paladin74/neural-network-animation
|
refs/heads/master
|
matplotlib/backends/qt_editor/__init__.py
|
118
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
|
xHeliotrope/injustice_dropper
|
refs/heads/master
|
env/lib/python3.4/site-packages/gunicorn/app/wsgiapp.py
|
96
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from gunicorn.errors import ConfigError
from gunicorn.app.base import Application
from gunicorn import util
class WSGIApplication(Application):
def init(self, parser, opts, args):
if opts.paste and opts.paste is not None:
app_name = 'main'
path = opts.paste
if '#' in path:
path, app_name = path.split('#')
path = os.path.abspath(os.path.normpath(
os.path.join(util.getcwd(), path)))
if not os.path.exists(path):
raise ConfigError("%r not found" % path)
# paste application, load the config
self.cfgurl = 'config:%s#%s' % (path, app_name)
self.relpath = os.path.dirname(path)
from .pasterapp import paste_config
return paste_config(self.cfg, self.cfgurl, self.relpath)
if len(args) < 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
def chdir(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# add the path to sys.path
sys.path.insert(0, self.cfg.chdir)
def load_wsgiapp(self):
self.chdir()
# load the app
return util.import_app(self.app_uri)
def load_pasteapp(self):
self.chdir()
# load the paste app
from .pasterapp import load_pasteapp
return load_pasteapp(self.cfgurl, self.relpath, global_conf=None)
def load(self):
if self.cfg.paste is not None:
return self.load_pasteapp()
else:
return self.load_wsgiapp()
def run():
"""\
The ``gunicorn`` command line runner for launching Gunicorn with
generic WSGI applications.
"""
from gunicorn.app.wsgiapp import WSGIApplication
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
if __name__ == '__main__':
run()
|
simdeveloper/bitcoin
|
refs/heads/master
|
qa/rpc-tests/test_framework/authproxy.py
|
22
|
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return round(o, 8)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self.__service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self.__service_name is not None:
name = "%s.%s" % (self.__service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except httplib.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self.__service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self.__service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
response = self._request('POST', self.__url.path, postdata)
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
log.debug("--> "+postdata)
return self._request('POST', self.__url.path, postdata)
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
else:
log.debug("<-- "+responsedata)
return response
|
gangadharkadam/johnerp
|
refs/heads/develop
|
erpnext/patches/v4_0/fix_address_template.py
|
39
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
from __future__ import unicode_literals
import frappe
def execute():
missing_line = """{{ address_line1 }}<br>"""
for name, template in frappe.db.sql("select name, template from `tabAddress Template`"):
if missing_line not in template:
d = frappe.get_doc("Address Template", name)
d.template = missing_line + d.template
d.save()
|
muxi/grpc
|
refs/heads/master
|
src/python/grpcio_reflection/grpc_reflection/__init__.py
|
525
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
ahojjati/grr
|
refs/heads/master
|
client/vfs.py
|
6
|
#!/usr/bin/env python
"""This file implements a VFS abstraction on the client."""
from grr.client import client_utils
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import paths as rdf_paths
# A central Cache for vfs handlers. This can be used to keep objects alive
# for a limited time.
DEVICE_CACHE = utils.TimeBasedCache()
class VFSHandler(object):
"""Base class for handling objects in the VFS."""
supported_pathtype = -1
# Should this handler be auto-registered?
auto_register = False
size = 0
offset = 0
# This is the VFS path to this specific handler.
path = "/"
# This will be set by the VFSOpen factory to the pathspec of the final
# destination of this handler. This pathspec will be case corrected and
# updated to reflect any potential recursion.
pathspec = None
base_fd = None
__metaclass__ = registry.MetaclassRegistry
def __init__(self, base_fd, pathspec=None, progress_callback=None,
full_pathspec=None):
"""Constructor.
Args:
base_fd: A handler to the predecessor handler.
pathspec: The pathspec to open.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
full_pathspec: The full pathspec we are trying to open.
Raises:
IOError: if this handler can not be instantiated over the
requested path.
"""
_ = pathspec
_ = full_pathspec
self.base_fd = base_fd
self.progress_callback = progress_callback
if base_fd is None:
self.pathspec = rdf_paths.PathSpec()
else:
# Make a copy of the base pathspec.
self.pathspec = base_fd.pathspec.Copy()
self.metadata = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Close()
return False
def Seek(self, offset, whence=0):
"""Seek to an offset in the file."""
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.size + offset
else:
raise RuntimeError("Illegal whence value %s" % whence)
def Read(self, length):
"""Reads some data from the file."""
raise NotImplementedError
def Stat(self):
"""Returns a StatEntry about this file."""
raise NotImplementedError
def IsDirectory(self):
"""Returns true if this object can contain other objects."""
raise NotImplementedError
def Tell(self):
return self.offset
def Close(self):
"""Close internal file descriptors."""
def OpenAsContainer(self):
"""Guesses a container from the current object."""
if self.IsDirectory():
return self
# TODO(user): Add support for more containers here (e.g. registries, zip
# files etc).
else: # For now just guess TSK.
return VFS_HANDLERS[rdf_paths.PathSpec.PathType.TSK](
self, rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.TSK),
progress_callback=self.progress_callback)
def MatchBestComponentName(self, component):
"""Returns the name of the component which matches best our base listing.
In order to do the best case insensitive matching we list the files in the
base handler and return the base match for this component.
Args:
component: A component name which should be present in this directory.
Returns:
the best component name.
"""
fd = self.OpenAsContainer()
# Adjust the component casing
file_listing = set(fd.ListNames())
# First try an exact match
if component not in file_listing:
# Now try to match lower case
lower_component = component.lower()
for x in file_listing:
if lower_component == x.lower():
component = x
break
if fd.supported_pathtype != self.pathspec.pathtype:
new_pathspec = rdf_paths.PathSpec(path=component,
pathtype=fd.supported_pathtype)
else:
new_pathspec = self.pathspec.last.Copy()
new_pathspec.path = component
return new_pathspec
def ListFiles(self):
"""An iterator over all VFS files contained in this directory.
Generates a StatEntry for each file or directory.
Raises:
IOError: if this fails.
"""
def ListNames(self):
"""A generator for all names in this directory."""
return []
# These are file object conformant namings for library functions that
# grr uses, and that expect to interact with 'real' file objects.
read = utils.Proxy("Read")
seek = utils.Proxy("Seek")
stat = utils.Proxy("Stat")
tell = utils.Proxy("Tell")
close = utils.Proxy("Close")
@classmethod
def Open(cls, fd, component, pathspec=None, progress_callback=None,
full_pathspec=None):
"""Try to correct the casing of component.
This method is called when we failed to open the component directly. We try
to transform the component into something which is likely to work.
In this implementation, we correct the case of the component until we can
not open the path any more.
Args:
fd: The base fd we will use.
component: The component we should open.
pathspec: The rest of the pathspec object.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
full_pathspec: The full pathspec we are trying to open.
Returns:
A file object.
Raises:
IOError: If nothing could be opened still.
"""
# The handler for this component
try:
handler = VFS_HANDLERS[component.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % component.pathtype)
# We will not do any case folding unless requested.
if component.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
return handler(base_fd=fd, pathspec=component)
path_components = client_utils.LocalPathToCanonicalPath(component.path)
path_components = ["/"] + filter(None, path_components.split("/"))
for i, path_component in enumerate(path_components):
try:
if fd:
new_pathspec = fd.MatchBestComponentName(path_component)
else:
new_pathspec = component
new_pathspec.path = path_component
# The handler for this component
try:
handler = VFS_HANDLERS[new_pathspec.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % new_pathspec.pathtype)
fd = handler(base_fd=fd, pathspec=new_pathspec,
full_pathspec=full_pathspec,
progress_callback=progress_callback)
except IOError:
# Can not open the first component, we must raise here.
if i <= 1:
raise IOError("File not found")
# Insert the remaining path at the front of the pathspec.
pathspec.Insert(0, path=utils.JoinPath(*path_components[i:]),
pathtype=rdf_paths.PathSpec.PathType.TSK)
break
return fd
def GetMetadata(self):
return self.metadata
# A registry of all VFSHandler registered
VFS_HANDLERS = {}
# The paths we should use as virtual root for VFS operations.
VFS_VIRTUALROOTS = {}
class VFSInit(registry.InitHook):
"""Register all known vfs handlers to open a pathspec types."""
def Run(self):
VFS_HANDLERS.clear()
for handler in VFSHandler.classes.values():
if handler.auto_register:
VFS_HANDLERS[handler.supported_pathtype] = handler
VFS_VIRTUALROOTS.clear()
vfs_virtualroots = config_lib.CONFIG["Client.vfs_virtualroots"]
for vfs_virtualroot in vfs_virtualroots:
try:
handler_string, root = vfs_virtualroot.split(":", 1)
except ValueError:
raise ValueError(
"Badly formatted vfs virtual root: %s. Correct format is "
"os:/path/to/virtual_root" % vfs_virtualroot)
handler_string = handler_string.upper()
handler = rdf_paths.PathSpec.PathType.enum_dict.get(handler_string)
if handler is None:
raise ValueError("Unsupported vfs handler: %s." % handler_string)
# We need some translation here, TSK needs an OS virtual root base. For
# every other handler we can just keep the type the same.
base_types = {
rdf_paths.PathSpec.PathType.TSK: rdf_paths.PathSpec.PathType.OS
}
base_type = base_types.get(handler, handler)
VFS_VIRTUALROOTS[handler] = rdf_paths.PathSpec(
path=root, pathtype=base_type, is_virtualroot=True)
def VFSOpen(pathspec, progress_callback=None):
"""Expands pathspec to return an expanded Path.
A pathspec is a specification of how to access the file by recursively opening
each part of the path by different drivers. For example the following
pathspec:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img"
nested_path {
pathtype: TSK
path: "/home/a.txt"
}
}
Instructs the system to:
1) open /dev/sda1 using the OS driver.
2) Pass the obtained filelike object to the TSK driver to open
"/home/image2.img".
3) The obtained filelike object should be passed to the TSK driver to open
"/home/a.txt".
The problem remains how to get to this expanded path specification. Since the
server is not aware of all the files on the client, the server may request
this:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img/home/a.txt"
}
Or even this:
pathtype: OS
path: "/dev/sda1/home/image2.img/home/a.txt"
This function converts the pathspec requested by the server into an expanded
pathspec required to actually open the file. This is done by expanding each
component of the pathspec in turn.
Expanding the component is done by opening each leading directory in turn and
checking if it is a directory of a file. If its a file, we examine the file
headers to determine the next appropriate driver to use, and create a nested
pathspec.
Note that for some clients there might be a virtual root specified. This
is a directory that gets prepended to all pathspecs of a given
pathtype. For example if there is a virtual root defined as
["os:/virtualroot"], a path specification like
pathtype: OS
path: "/home/user/*"
will get translated into
pathtype: OS
path: "/virtualroot"
is_virtualroot: True
nested_path {
pathtype: OS
path: "/dev/sda1"
}
Args:
pathspec: A Path() protobuf to normalize.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
The open filelike object. This will contain the expanded Path() protobuf as
the member fd.pathspec.
Raises:
IOError: if one of the path components can not be opened.
"""
fd = None
# Adjust the pathspec in case we are using a vfs_virtualroot.
vroot = VFS_VIRTUALROOTS.get(pathspec.pathtype)
# If we have a virtual root for this vfs handler, we need to prepend
# it to the incoming pathspec except if the pathspec is explicitly
# marked as containing a virtual root already or if it isn't marked but
# the path already contains the virtual root.
if (not vroot or
pathspec.is_virtualroot or
pathspec.CollapsePath().startswith(vroot.CollapsePath())):
# No virtual root but opening changes the pathspec so we always work on a
# copy.
working_pathspec = pathspec.Copy()
else:
# We're in a virtual root, put the target pathspec inside the virtual root
# as a nested path.
working_pathspec = vroot.Copy()
working_pathspec.last.nested_path = pathspec.Copy()
# For each pathspec step, we get the handler for it and instantiate it with
# the old object, and the current step.
while working_pathspec:
component = working_pathspec.Pop()
try:
handler = VFS_HANDLERS[component.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % component.pathtype)
try:
# Open the component.
fd = handler.Open(fd, component, pathspec=working_pathspec,
full_pathspec=pathspec,
progress_callback=progress_callback)
except IOError as e:
raise IOError("%s: %s" % (e, pathspec))
return fd
def ReadVFS(pathspec, offset, length, progress_callback=None):
"""Read from the VFS and return the contents.
Args:
pathspec: path to read from
offset: number of bytes to skip
length: number of bytes to read
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
VFS file contents
"""
fd = VFSOpen(pathspec, progress_callback=progress_callback)
fd.Seek(offset)
return fd.Read(length)
|
aarchiba/scipy
|
refs/heads/master
|
scipy/linalg/special_matrices.py
|
4
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from scipy._lib.six import xrange
from scipy._lib.six import string_types
from numpy.lib.stride_tricks import as_strided
__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
'fiedler', 'fiedler_companion']
# -----------------------------------------------------------------------------
# matrix construction functions
# -----------------------------------------------------------------------------
#
# *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in
# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
# compatibility.
def tri(N, M=None, k=0, dtype=None):
"""
Construct (N, M) matrix filled with ones at and below the k-th diagonal.
The matrix has A[i,j] == 1 for i <= j + k
Parameters
----------
N : int
The size of the first dimension of the matrix.
M : int or None, optional
The size of the second dimension of the matrix. If `M` is None,
`M = N` is assumed.
k : int, optional
Number of subdiagonal below which matrix is filled with ones.
`k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
superdiagonal.
dtype : dtype, optional
Data type of the matrix.
Returns
-------
tri : (N, M) ndarray
Tri matrix.
Examples
--------
>>> from scipy.linalg import tri
>>> tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> tri(3, 5, -1, dtype=int)
array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0]])
"""
if M is None:
M = N
if isinstance(M, string_types):
# pearu: any objections to remove this feature?
# As tri(N,'d') is equivalent to tri(N,dtype='d')
dtype = M
M = N
m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M))
if dtype is None:
return m
else:
return m.astype(dtype)
def tril(m, k=0):
"""
Make a copy of a matrix with elements above the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal above which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
tril : ndarray
Return is the same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import tril
>>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = np.asarray(m)
out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
return out
def triu(m, k=0):
"""
Make a copy of a matrix with elements below the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal below which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
triu : ndarray
Return matrix with zeroed elements below the k-th diagonal and has
same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import triu
>>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = np.asarray(m)
out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
return out
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like, optional
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See Also
--------
circulant : circulant matrix
hankel : Hankel matrix
solve_toeplitz : Solve a Toeplitz system.
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1D array containing a reversed c followed by r[1:] that could be
# strided to give us toeplitz matrix.
vals = np.concatenate((c[::-1], r[1:]))
out_shp = len(c), len(r)
n = vals.strides[0]
return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy()
def circulant(c):
"""
Construct a circulant matrix.
Parameters
----------
c : (N,) array_like
1-D array, the first column of the matrix.
Returns
-------
A : (N, N) ndarray
A circulant matrix whose first column is `c`.
See Also
--------
toeplitz : Toeplitz matrix
hankel : Hankel matrix
solve_circulant : Solve a circulant system.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import circulant
>>> circulant([1, 2, 3])
array([[1, 3, 2],
[2, 1, 3],
[3, 2, 1]])
"""
c = np.asarray(c).ravel()
# Form an extended array that could be strided to give circulant version
c_ext = np.concatenate((c[::-1], c[:0:-1]))
L = len(c)
n = c_ext.strides[0]
return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy()
def hankel(c, r=None):
"""
Construct a Hankel matrix.
The Hankel matrix has constant anti-diagonals, with `c` as its
first column and `r` as its last row. If `r` is not given, then
`r = zeros_like(c)` is assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like, optional
Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
r[0] is ignored; the last row of the returned matrix is
``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See Also
--------
toeplitz : Toeplitz matrix
circulant : circulant matrix
Examples
--------
>>> from scipy.linalg import hankel
>>> hankel([1, 17, 99])
array([[ 1, 17, 99],
[17, 99, 0],
[99, 0, 0]])
>>> hankel([1,2,3,4], [4,7,7,8,9])
array([[1, 2, 3, 4, 7],
[2, 3, 4, 7, 7],
[3, 4, 7, 7, 8],
[4, 7, 7, 8, 9]])
"""
c = np.asarray(c).ravel()
if r is None:
r = np.zeros_like(c)
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing `c`
# followed by r[1:].
vals = np.concatenate((c, r[1:]))
# Stride on concatenated array to get hankel matrix
out_shp = len(c), len(r)
n = vals.strides[0]
return as_strided(vals, shape=out_shp, strides=(n, n)).copy()
def hadamard(n, dtype=int):
"""
Construct a Hadamard matrix.
Constructs an n-by-n Hadamard matrix, using Sylvester's
construction. `n` must be a power of 2.
Parameters
----------
n : int
The order of the matrix. `n` must be a power of 2.
dtype : dtype, optional
The data type of the array to be constructed.
Returns
-------
H : (n, n) ndarray
The Hadamard matrix.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import hadamard
>>> hadamard(2, dtype=complex)
array([[ 1.+0.j, 1.+0.j],
[ 1.+0.j, -1.-0.j]])
>>> hadamard(4)
array([[ 1, 1, 1, 1],
[ 1, -1, 1, -1],
[ 1, 1, -1, -1],
[ 1, -1, -1, 1]])
"""
# This function is a slightly modified version of the
# function contributed by Ivo in ticket #675.
if n < 1:
lg2 = 0
else:
lg2 = int(math.log(n, 2))
if 2 ** lg2 != n:
raise ValueError("n must be an positive integer, and n must be "
"a power of 2")
H = np.array([[1]], dtype=dtype)
# Sylvester's construction
for i in range(0, lg2):
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
return H
def leslie(f, s):
"""
Create a Leslie matrix.
Given the length n array of fecundity coefficients `f` and the length
n-1 array of survival coefficients `s`, return the associated Leslie
matrix.
Parameters
----------
f : (N,) array_like
The "fecundity" coefficients.
s : (N-1,) array_like
The "survival" coefficients, has to be 1-D. The length of `s`
must be one less than the length of `f`, and it must be at least 1.
Returns
-------
L : (N, N) ndarray
The array is zero except for the first row,
which is `f`, and the first sub-diagonal, which is `s`.
The data-type of the array will be the data-type of ``f[0]+s[0]``.
Notes
-----
.. versionadded:: 0.8.0
The Leslie matrix is used to model discrete-time, age-structured
population growth [1]_ [2]_. In a population with `n` age classes, two sets
of parameters define a Leslie matrix: the `n` "fecundity coefficients",
which give the number of offspring per-capita produced by each age
class, and the `n` - 1 "survival coefficients", which give the
per-capita survival rate of each age class.
References
----------
.. [1] P. H. Leslie, On the use of matrices in certain population
mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
.. [2] P. H. Leslie, Some further notes on the use of matrices in
population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
(Dec. 1948)
Examples
--------
>>> from scipy.linalg import leslie
>>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
array([[ 0.1, 2. , 1. , 0.1],
[ 0.2, 0. , 0. , 0. ],
[ 0. , 0.8, 0. , 0. ],
[ 0. , 0. , 0.7, 0. ]])
"""
f = np.atleast_1d(f)
s = np.atleast_1d(s)
if f.ndim != 1:
raise ValueError("Incorrect shape for f. f must be one-dimensional")
if s.ndim != 1:
raise ValueError("Incorrect shape for s. s must be one-dimensional")
if f.size != s.size + 1:
raise ValueError("Incorrect lengths for f and s. The length"
" of s must be one less than the length of f.")
if s.size == 0:
raise ValueError("The length of s must be at least 1.")
tmp = f[0] + s[0]
n = f.size
a = np.zeros((n, n), dtype=tmp.dtype)
a[0] = f
a[list(range(1, n)), list(range(0, n - 1))] = s
return a
def kron(a, b):
"""
Kronecker product.
The result is the block matrix::
a[0,0]*b a[0,1]*b ... a[0,-1]*b
a[1,0]*b a[1,1]*b ... a[1,-1]*b
...
a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b
Parameters
----------
a : (M, N) ndarray
Input array
b : (P, Q) ndarray
Input array
Returns
-------
A : (M*P, N*Q) ndarray
Kronecker product of `a` and `b`.
Examples
--------
>>> from numpy import array
>>> from scipy.linalg import kron
>>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
"""
if not a.flags['CONTIGUOUS']:
a = np.reshape(a, a.shape)
if not b.flags['CONTIGUOUS']:
b = np.reshape(b, b.shape)
o = np.outer(a, b)
o = o.reshape(a.shape + b.shape)
return np.concatenate(np.concatenate(o, axis=1), axis=1)
def block_diag(*arrs):
"""
Create a block diagonal matrix from provided arrays.
Given the inputs `A`, `B` and `C`, the output will have these
arrays arranged on the diagonal::
[[A, 0, 0],
[0, B, 0],
[0, 0, C]]
Parameters
----------
A, B, C, ... : array_like, up to 2-D
Input arrays. A 1-D array or array_like sequence of length `n` is
treated as a 2-D array with shape ``(1,n)``.
Returns
-------
D : ndarray
Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
same dtype as `A`.
Notes
-----
If all the input arrays are square, the output is known as a
block diagonal matrix.
Empty sequences (i.e., array-likes of zero size) will not be ignored.
Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``.
Examples
--------
>>> from scipy.linalg import block_diag
>>> A = [[1, 0],
... [0, 1]]
>>> B = [[3, 4, 5],
... [6, 7, 8]]
>>> C = [[7]]
>>> P = np.zeros((2, 0), dtype='int32')
>>> block_diag(A, B, C)
array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 0],
[0, 0, 6, 7, 8, 0],
[0, 0, 0, 0, 0, 7]])
>>> block_diag(A, P, B, C)
array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 0],
[0, 0, 6, 7, 8, 0],
[0, 0, 0, 0, 0, 7]])
>>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 3., 0., 0.],
[ 0., 0., 0., 4., 5.],
[ 0., 0., 0., 6., 7.]])
"""
if arrs == ():
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
if bad_args:
raise ValueError("arguments in the following positions have dimension "
"greater than 2: %s" % bad_args)
shapes = np.array([a.shape for a in arrs])
out_dtype = np.find_common_type([arr.dtype for arr in arrs], [])
out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return out
def companion(a):
"""
Create a companion matrix.
Create the companion matrix [1]_ associated with the polynomial whose
coefficients are given in `a`.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients. The length of `a` must be
at least two, and ``a[0]`` must not be zero.
Returns
-------
c : (N-1, N-1) ndarray
The first row of `c` is ``-a[1:]/a[0]``, and the first
sub-diagonal is all ones. The data-type of the array is the same
as the data-type of ``1.0*a[0]``.
Raises
------
ValueError
If any of the following are true: a) ``a.ndim != 1``;
b) ``a.size < 2``; c) ``a[0] == 0``.
Notes
-----
.. versionadded:: 0.8.0
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> from scipy.linalg import companion
>>> companion([1, -10, 31, -30])
array([[ 10., -31., 30.],
[ 1., 0., 0.],
[ 0., 1., 0.]])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Incorrect shape for `a`. `a` must be "
"one-dimensional.")
if a.size < 2:
raise ValueError("The length of `a` must be at least 2.")
if a[0] == 0:
raise ValueError("The first coefficient in `a` must not be zero.")
first_row = -a[1:] / (1.0 * a[0])
n = a.size
c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
c[0] = first_row
c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
return c
def helmert(n, full=False):
"""
Create a Helmert matrix of order `n`.
This has applications in statistics, compositional or simplicial analysis,
and in Aitchison geometry.
Parameters
----------
n : int
The size of the array to create.
full : bool, optional
If True the (n, n) ndarray will be returned.
Otherwise the submatrix that does not include the first
row will be returned.
Default: False.
Returns
-------
M : ndarray
The Helmert matrix.
The shape is (n, n) or (n-1, n) depending on the `full` argument.
Examples
--------
>>> from scipy.linalg import helmert
>>> helmert(5, full=True)
array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ],
[ 0.70710678, -0.70710678, 0. , 0. , 0. ],
[ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ],
[ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ],
[ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]])
"""
H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n))
d = np.arange(n) * np.arange(1, n+1)
H[0] = 1
d[0] = n
H_full = H / np.sqrt(d)[:, np.newaxis]
if full:
return H_full
else:
return H_full[1:]
def hilbert(n):
"""
Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]])
"""
values = 1.0 / (1.0 + np.arange(2 * n - 1))
h = hankel(values[:n], r=values[n - 1:])
return h
def invhilbert(n, exact=False):
"""
Compute the inverse of the Hilbert matrix of order `n`.
The entries in the inverse of a Hilbert matrix are integers. When `n`
is greater than 14, some entries in the inverse exceed the upper limit
of 64 bit integers. The `exact` argument provides two options for
dealing with these large integers.
Parameters
----------
n : int
The order of the Hilbert matrix.
exact : bool, optional
If False, the data type of the array that is returned is np.float64,
and the array is an approximation of the inverse.
If True, the array is the exact integer inverse array. To represent
the exact inverse when n > 14, the returned array is an object array
of long integers. For n <= 14, the exact inverse is returned as an
array with data type np.int64.
Returns
-------
invh : (n, n) ndarray
The data type of the array is np.float64 if `exact` is False.
If `exact` is True, the data type is either np.int64 (for n <= 14)
or object (for n > 14). In the latter case, the objects in the
array will be long integers.
See Also
--------
hilbert : Create a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import invhilbert
>>> invhilbert(4)
array([[ 16., -120., 240., -140.],
[ -120., 1200., -2700., 1680.],
[ 240., -2700., 6480., -4200.],
[ -140., 1680., -4200., 2800.]])
>>> invhilbert(4, exact=True)
array([[ 16, -120, 240, -140],
[ -120, 1200, -2700, 1680],
[ 240, -2700, 6480, -4200],
[ -140, 1680, -4200, 2800]], dtype=int64)
>>> invhilbert(16)[7,7]
4.2475099528537506e+19
>>> invhilbert(16, exact=True)[7,7]
42475099528537378560L
"""
from scipy.special import comb
if exact:
if n > 14:
dtype = object
else:
dtype = np.int64
else:
dtype = np.float64
invh = np.empty((n, n), dtype=dtype)
for i in xrange(n):
for j in xrange(0, i + 1):
s = i + j
invh[i, j] = ((-1) ** s * (s + 1) *
comb(n + i, n - j - 1, exact) *
comb(n + j, n - i - 1, exact) *
comb(s, i, exact) ** 2)
if i != j:
invh[j, i] = invh[i, j]
return invh
def pascal(n, kind='symmetric', exact=True):
"""
Returns the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
numpy.uint64 (if n < 35) or an object array of Python long integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and the values in the array will not be the exact
coefficients, but this version is much faster than `exact=True`.
Returns
-------
p : (n, n) ndarray
The Pascal matrix.
See Also
--------
invpascal
Notes
-----
See https://en.wikipedia.org/wiki/Pascal_matrix for more information
about Pascal matrices.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.linalg import pascal
>>> pascal(4)
array([[ 1, 1, 1, 1],
[ 1, 2, 3, 4],
[ 1, 3, 6, 10],
[ 1, 4, 10, 20]], dtype=uint64)
>>> pascal(4, kind='lower')
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]], dtype=uint64)
>>> pascal(50)[-1, -1]
25477612258980856902730428600L
>>> from scipy.special import comb
>>> comb(98, 49, exact=True)
25477612258980856902730428600L
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
if exact:
if n >= 35:
L_n = np.empty((n, n), dtype=object)
L_n.fill(0)
else:
L_n = np.zeros((n, n), dtype=np.uint64)
for i in range(n):
for j in range(i + 1):
L_n[i, j] = comb(i, j, exact=True)
else:
L_n = comb(*np.ogrid[:n, :n])
if kind == 'lower':
p = L_n
elif kind == 'upper':
p = L_n.T
else:
p = np.dot(L_n, L_n.T)
return p
def invpascal(n, kind='symmetric', exact=True):
"""
Returns the inverse of the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
``numpy.int64`` (if `n` <= 35) or an object array of Python integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and for large `n`, the values in the array will not be the
exact coefficients.
Returns
-------
invp : (n, n) ndarray
The inverse of the Pascal matrix.
See Also
--------
pascal
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix
.. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
Gazette, 59(408), pp. 111-112, 1975.
Examples
--------
>>> from scipy.linalg import invpascal, pascal
>>> invp = invpascal(5)
>>> invp
array([[ 5, -10, 10, -5, 1],
[-10, 30, -35, 19, -4],
[ 10, -35, 46, -27, 6],
[ -5, 19, -27, 17, -4],
[ 1, -4, 6, -4, 1]])
>>> p = pascal(5)
>>> p.dot(invp)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
An example of the use of `kind` and `exact`:
>>> invpascal(5, kind='lower', exact=False)
array([[ 1., -0., 0., -0., 0.],
[-1., 1., -0., 0., -0.],
[ 1., -2., 1., -0., 0.],
[-1., 3., -3., 1., -0.],
[ 1., -4., 6., -4., 1.]])
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
if kind == 'symmetric':
if exact:
if n > 34:
dt = object
else:
dt = np.int64
else:
dt = np.float64
invp = np.empty((n, n), dtype=dt)
for i in range(n):
for j in range(0, i + 1):
v = 0
for k in range(n - i):
v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
exact=exact)
invp[i, j] = (-1)**(i - j) * v
if i != j:
invp[j, i] = invp[i, j]
else:
# For the 'lower' and 'upper' cases, we computer the inverse by
# changing the sign of every other diagonal of the pascal matrix.
invp = pascal(n, kind=kind, exact=exact)
if invp.dtype == np.uint64:
# This cast from np.uint64 to int64 OK, because if `kind` is not
# "symmetric", the values in invp are all much less than 2**63.
invp = invp.view(np.int64)
# The toeplitz matrix has alternating bands of 1 and -1.
invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
return invp
def dft(n, scale=None):
"""
Discrete Fourier transform matrix.
Create the matrix that computes the discrete Fourier transform of a
sequence [1]_. The n-th primitive root of unity used to generate the
matrix is exp(-2*pi*i/n), where i = sqrt(-1).
Parameters
----------
n : int
Size the matrix to create.
scale : str, optional
Must be None, 'sqrtn', or 'n'.
If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
If `scale` is 'n', the matrix is divided by `n`.
If `scale` is None (the default), the matrix is not normalized, and the
return value is simply the Vandermonde matrix of the roots of unity.
Returns
-------
m : (n, n) ndarray
The DFT matrix.
Notes
-----
When `scale` is None, multiplying a vector by the matrix returned by
`dft` is mathematically equivalent to (but much less efficient than)
the calculation performed by `scipy.fft.fft`.
.. versionadded:: 0.14.0
References
----------
.. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix
Examples
--------
>>> from scipy.linalg import dft
>>> np.set_printoptions(precision=2, suppress=True) # for compact output
>>> m = dft(5)
>>> m
array([[ 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j ],
[ 1. +0.j , 0.31-0.95j, -0.81-0.59j, -0.81+0.59j, 0.31+0.95j],
[ 1. +0.j , -0.81-0.59j, 0.31+0.95j, 0.31-0.95j, -0.81+0.59j],
[ 1. +0.j , -0.81+0.59j, 0.31-0.95j, 0.31+0.95j, -0.81-0.59j],
[ 1. +0.j , 0.31+0.95j, -0.81+0.59j, -0.81-0.59j, 0.31-0.95j]])
>>> x = np.array([1, 2, 3, 0, 3])
>>> m @ x # Compute the DFT of x
array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
Verify that ``m @ x`` is the same as ``fft(x)``.
>>> from scipy.fft import fft
>>> fft(x) # Same result as m @ x
array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
"""
if scale not in [None, 'sqrtn', 'n']:
raise ValueError("scale must be None, 'sqrtn', or 'n'; "
"%r is not valid." % (scale,))
omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
m = omegas ** np.arange(n)
if scale == 'sqrtn':
m /= math.sqrt(n)
elif scale == 'n':
m /= n
return m
def fiedler(a):
"""Returns a symmetric Fiedler matrix
Given an sequence of numbers `a`, Fiedler matrices have the structure
``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative
entries. A Fiedler matrix has a dominant positive eigenvalue and other
eigenvalues are negative. Although not valid generally, for certain inputs,
the inverse and the determinant can be derived explicitly as given in [1]_.
Parameters
----------
a : (n,) array_like
coefficient array
Returns
-------
F : (n, n) ndarray
See Also
--------
circulant, toeplitz
Notes
-----
.. versionadded:: 1.3.0
References
----------
.. [1] J. Todd, "Basic Numerical Mathematics: Vol.2 : Numerical Algebra",
1977, Birkhauser, :doi:`10.1007/978-3-0348-7286-7`
Examples
--------
>>> from scipy.linalg import det, inv, fiedler
>>> a = [1, 4, 12, 45, 77]
>>> n = len(a)
>>> A = fiedler(a)
>>> A
array([[ 0, 3, 11, 44, 76],
[ 3, 0, 8, 41, 73],
[11, 8, 0, 33, 65],
[44, 41, 33, 0, 32],
[76, 73, 65, 32, 0]])
The explicit formulas for determinant and inverse seem to hold only for
monotonically increasing/decreasing arrays. Note the tridiagonal structure
and the corners.
>>> Ai = inv(A)
>>> Ai[np.abs(Ai) < 1e-12] = 0. # cleanup the numerical noise for display
>>> Ai
array([[-0.16008772, 0.16666667, 0. , 0. , 0.00657895],
[ 0.16666667, -0.22916667, 0.0625 , 0. , 0. ],
[ 0. , 0.0625 , -0.07765152, 0.01515152, 0. ],
[ 0. , 0. , 0.01515152, -0.03077652, 0.015625 ],
[ 0.00657895, 0. , 0. , 0.015625 , -0.00904605]])
>>> det(A)
15409151.999999998
>>> (-1)**(n-1) * 2**(n-2) * np.diff(a).prod() * (a[-1] - a[0])
15409152
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Input 'a' must be a 1D array.")
if a.size == 0:
return np.array([], dtype=float)
elif a.size == 1:
return np.array([[0.]])
else:
return np.abs(a[:, None] - a)
def fiedler_companion(a):
""" Returns a Fiedler companion matrix
Given a polynomial coefficient array ``a``, this function forms a
pentadiagonal matrix with a special structure whose eigenvalues coincides
with the roots of ``a``.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients in descending order with a nonzero
leading coefficient. For ``N < 2``, an empty array is returned.
Returns
-------
c : (N-1, N-1) ndarray
Resulting companion matrix
Notes
-----
Similar to `companion` the leading coefficient should be nonzero. In case
the leading coefficient is not 1., other coefficients are rescaled before
the array generation. To avoid numerical issues, it is best to provide a
monic polynomial.
.. versionadded:: 1.3.0
See Also
--------
companion
References
----------
.. [1] M. Fiedler, " A note on companion matrices", Linear Algebra and its
Applications, 2003, :doi:`10.1016/S0024-3795(03)00548-2`
Examples
--------
>>> from scipy.linalg import fiedler_companion, eigvals
>>> p = np.poly(np.arange(1, 9, 2)) # [1., -16., 86., -176., 105.]
>>> fc = fiedler_companion(p)
>>> fc
array([[ 16., -86., 1., 0.],
[ 1., 0., 0., 0.],
[ 0., 176., 0., -105.],
[ 0., 1., 0., 0.]])
>>> eigvals(fc)
array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Input 'a' must be a 1D array.")
if a.size <= 2:
if a.size == 2:
return np.array([[-(a/a[0])[-1]]])
return np.array([], dtype=a.dtype)
if a[0] == 0.:
raise ValueError('Leading coefficient is zero.')
a = a/a[0]
n = a.size - 1
c = np.zeros((n, n), dtype=a.dtype)
# subdiagonals
c[range(3, n, 2), range(1, n-2, 2)] = 1.
c[range(2, n, 2), range(1, n-1, 2)] = -a[3::2]
# superdiagonals
c[range(0, n-2, 2), range(2, n, 2)] = 1.
c[range(0, n-1, 2), range(1, n, 2)] = -a[2::2]
c[[0, 1], 0] = [-a[1], 1]
return c
|
kriwil/django-filer
|
refs/heads/develop
|
filer/south_migrations/0009_auto__add_field_folderpermission_can_edit_new__add_field_folderpermiss.py
|
49
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FolderPermission.can_edit_new'
db.add_column('filer_folderpermission', 'can_edit_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'FolderPermission.can_read_new'
db.add_column('filer_folderpermission', 'can_read_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'FolderPermission.can_add_children_new'
db.add_column('filer_folderpermission', 'can_add_children_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FolderPermission.can_edit_new'
db.delete_column('filer_folderpermission', 'can_edit_new')
# Deleting field 'FolderPermission.can_read_new'
db.delete_column('filer_folderpermission', 'can_read_new')
# Deleting field 'FolderPermission.can_add_children_new'
db.delete_column('filer_folderpermission', 'can_add_children_new')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.clipboard': {
'Meta': {'object_name': 'Clipboard'},
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'Meta': {'object_name': 'ClipboardItem'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'Meta': {'object_name': 'FolderPermission'},
'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_add_children_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_edit_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_read_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer']
|
murphycj/AGFusion
|
refs/heads/master
|
agfusion/__init__.py
|
1
|
from .cli import *
from .model import *
from .utils import *
from .database import *
from .exceptions import *
from .plot import *
from .parsers import *
from ._version import __version__
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/functionOccurrences.after.py
|
83
|
import xml.etree.ElementTree as etree
def entries_to_xml(entries, dict_id, dict_name, closed):
dictionary = etree.Element(u'Dictionary', IDName=dict_id)
a = etree.SubElement
a(dictionary, u'Name').text = dict_name
a(dictionary, u'Closed').text = repr(closed).lower()
a(dictionary, u'Action').text = u'false'
terms = a(dictionary, u'Terms')
for i, entry in enumerate(entries):
term = a(terms, u'Term')
a(term, u'Category')
words = a(term, u'Words')
return dictionary
|
ericpre/hyperspy
|
refs/heads/RELEASE_next_minor
|
hyperspy/utils/samfire.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""SAMFire modules
The :mod:`~hyperspy.api.samfire` module contains the following submodules:
fit_tests
Tests to check fit convergence when running SAMFire
global_strategies
Available global strategies to use in SAMFire
local_strategies
Available global strategies to use in SAMFire
SamfirePool
The parallel pool, customized to run SAMFire.
"""
from hyperspy.samfire_utils import (fit_tests, global_strategies,
local_strategies)
|
tek/amino
|
refs/heads/master
|
amino/boolean.py
|
1
|
from typing import Union, Any, TypeVar, Type, Callable, Tuple
import amino
from amino import maybe
from amino.either import Right, Left, Either
from amino.func import call_by_name
A = TypeVar('A')
B = TypeVar('B')
class Boolean:
def __init__(self, value: Union['Boolean', bool]) -> None:
self.value = bool(value)
@staticmethod
def wrap(value):
return Boolean(value)
@staticmethod
def issubclass(value: Type[A], tpe: Type[B]) -> 'Boolean':
return Boolean(isinstance(value, type) and issubclass(value, tpe))
@staticmethod
def isinstance(value: A, tpe: Union[Type[Any], Tuple[Type[Any], ...]]) -> 'Boolean':
return Boolean(isinstance(value, tpe))
@staticmethod
def is_a(tpe: Union[Type[Any], Tuple[Type[Any], ...]]) -> Callable[[Any], 'Boolean']:
return lambda a: Boolean.isinstance(a, tpe)
@staticmethod
def is_a_class(tpe: Type[A]) -> Callable[[Any], 'Boolean']:
return lambda a: Boolean.issubclass(a, tpe)
def maybe(self, value):
return maybe.Maybe.optional(value) if self else maybe.Empty()
def flat_maybe(self, value: 'Maybe'): # type: ignore
return value if self else maybe.Empty()
def maybe_call(self, f, *a, **kw):
return maybe.Just(f(*a, **kw)) if self else maybe.Empty()
def m(self, v):
return maybe.Maybe.optional(call_by_name(v)) if self else maybe.Empty()
def flat_maybe_call(self, f, *a, **kw):
return f(*a, **kw) if self else maybe.Empty()
def flat_m(self, v):
return call_by_name(v) if self else maybe.Empty()
def either(self, l, r):
return self.either_call(l, lambda: r)
def either_call(self, l, r):
return Right(r()) if self else Left(l)
def flat_either_call(self, l, r):
return r() if self else Left(l)
def e(self, f: A, t: B) -> Either[A, B]:
return Right(call_by_name(t)) if self else Left(call_by_name(f))
def flat_e(self, l, r):
return call_by_name(r) if self else Left(call_by_name(l))
def l(self, v: A) -> 'amino.List[A]':
return self.m(v) / amino.List | amino.Nil
def cata(self, t: A, f: A) -> A:
return t if self.value else f
def cata_call(self, t, f):
return t() if self.value else f()
def c(self, t: Callable[[], A], f: Callable[[], A]) -> A:
return call_by_name(t) if self.value else call_by_name(f)
def __bool__(self):
return self.value
def __str__(self):
return '⊤' if self.value else '⊥'
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.value)
def __eq__(self, other):
return (
self.value == other
if isinstance(other, bool) else
self.value == other.value
if isinstance(other, Boolean) else
False
)
def __and__(self, other: Any) -> 'Boolean':
return Boolean(self and other)
def __or__(self, other: Any) -> 'Boolean':
return Boolean(self or other)
def __invert__(self) -> 'Boolean':
return Boolean(not self.value)
def __xor__(self, other: Any) -> 'Boolean':
return Boolean(bool(self.value ^ bool(other)))
def __rxor__(self, other: Any) -> 'Boolean':
return Boolean(bool(self.value ^ bool(other)))
def __hash__(self) -> int:
return hash(self.value)
@property
def no(self):
return Boolean(not self.value)
@property
def json_repr(self):
return self.value
@property
def to_int(self) -> int:
return 1 if self else 0
true = Boolean(True)
false = Boolean(False)
__all__ = ('Boolean', 'true', 'false')
|
bmya/odoo-argentina
|
refs/heads/11.0
|
l10n_ar_base/__init__.py
|
1
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import SUPERUSER_ID, api
from odoo.addons import account
old_auto_install_l10n = account._auto_install_l10n
def ar_auto_install_l10n(cr, registry):
"""
overwrite of this function to install our localization module
"""
env = api.Environment(cr, SUPERUSER_ID, {})
country_code = env.user.company_id.country_id.code
if country_code and country_code == 'AR':
env['ir.module.module'].search([
('name', '=', 'l10n_ar_chart'),
('state', '=', 'uninstalled')]).button_install()
else:
return old_auto_install_l10n(cr, registry)
account._auto_install_l10n = ar_auto_install_l10n
|
MarcusTan/yncn-grid
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/utils.py
|
250
|
from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to dal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
return moduleFactory
|
aisipos/django
|
refs/heads/master
|
django/test/testcases.py
|
3
|
from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urljoin, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform any post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
if netloc and netloc != 'testserver':
raise ValueError(
"The Django test client is unable to fetch remote URLs (got %s). "
"Use assertRedirects(..., fetch_redirect_response=False) instead." % url
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors)
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, instance, cls=None):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = self._create_server(port)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def _create_server(self, port):
return WSGIServer((self.host, port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (
cls.server_thread.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081-8179')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = cls._create_server_thread(host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, host, possible_ports, connections_override):
return LiveServerThread(
host,
possible_ports,
cls.static_handler,
connections_override=connections_override,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
|
bolkedebruin/airflow
|
refs/heads/master
|
tests/providers/celery/sensors/test_celery_queue.py
|
1
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
from airflow.providers.celery.sensors.celery_queue import CeleryQueueSensor
class TestCeleryQueueSensor(unittest.TestCase):
def setUp(self):
class TestCeleryqueueSensor(CeleryQueueSensor):
def _check_task_id(self, context):
return True
self.sensor = TestCeleryqueueSensor
@patch('celery.app.control.Inspect')
def test_poke_success(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {
'test_queue': []
}
mock_inspect_result.scheduled.return_value = {
'test_queue': []
}
mock_inspect_result.active.return_value = {
'test_queue': []
}
test_sensor = self.sensor(celery_queue='test_queue',
task_id='test-task')
self.assertTrue(test_sensor.poke(None))
@patch('celery.app.control.Inspect')
def test_poke_fail(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {
'test_queue': []
}
mock_inspect_result.scheduled.return_value = {
'test_queue': []
}
mock_inspect_result.active.return_value = {
'test_queue': ['task']
}
test_sensor = self.sensor(celery_queue='test_queue',
task_id='test-task')
self.assertFalse(test_sensor.poke(None))
@patch('celery.app.control.Inspect')
def test_poke_success_with_taskid(self, mock_inspect):
test_sensor = self.sensor(celery_queue='test_queue',
task_id='test-task',
target_task_id='target-task')
self.assertTrue(test_sensor.poke(None))
|
pursuitxh/u-boot-2013.07
|
refs/heads/master
|
tools/buildman/builder.py
|
31
|
# Copyright (c) 2013 The Chromium OS Authors.
#
# Bloat-o-meter code used here Copyright 2004 Matt Mackall <mpm@selenic.com>
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import collections
import errno
from datetime import datetime, timedelta
import glob
import os
import re
import Queue
import shutil
import string
import sys
import threading
import time
import command
import gitutil
import terminal
import toolchain
"""
Theory of Operation
Please see README for user documentation, and you should be familiar with
that before trying to make sense of this.
Buildman works by keeping the machine as busy as possible, building different
commits for different boards on multiple CPUs at once.
The source repo (self.git_dir) contains all the commits to be built. Each
thread works on a single board at a time. It checks out the first commit,
configures it for that board, then builds it. Then it checks out the next
commit and builds it (typically without re-configuring). When it runs out
of commits, it gets another job from the builder and starts again with that
board.
Clearly the builder threads could work either way - they could check out a
commit and then built it for all boards. Using separate directories for each
commit/board pair they could leave their build product around afterwards
also.
The intent behind building a single board for multiple commits, is to make
use of incremental builds. Since each commit is built incrementally from
the previous one, builds are faster. Reconfiguring for a different board
removes all intermediate object files.
Many threads can be working at once, but each has its own working directory.
When a thread finishes a build, it puts the output files into a result
directory.
The base directory used by buildman is normally '../<branch>', i.e.
a directory higher than the source repository and named after the branch
being built.
Within the base directory, we have one subdirectory for each commit. Within
that is one subdirectory for each board. Within that is the build output for
that commit/board combination.
Buildman also create working directories for each thread, in a .bm-work/
subdirectory in the base dir.
As an example, say we are building branch 'us-net' for boards 'sandbox' and
'seaboard', and say that us-net has two commits. We will have directories
like this:
us-net/ base directory
01_of_02_g4ed4ebc_net--Add-tftp-speed-/
sandbox/
u-boot.bin
seaboard/
u-boot.bin
02_of_02_g4ed4ebc_net--Check-tftp-comp/
sandbox/
u-boot.bin
seaboard/
u-boot.bin
.bm-work/
00/ working directory for thread 0 (contains source checkout)
build/ build output
01/ working directory for thread 1
build/ build output
...
u-boot/ source directory
.git/ repository
"""
# Possible build outcomes
OUTCOME_OK, OUTCOME_WARNING, OUTCOME_ERROR, OUTCOME_UNKNOWN = range(4)
# Translate a commit subject into a valid filename
trans_valid_chars = string.maketrans("/: ", "---")
def Mkdir(dirname):
"""Make a directory if it doesn't already exist.
Args:
dirname: Directory to create
"""
try:
os.mkdir(dirname)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
raise
class BuilderJob:
"""Holds information about a job to be performed by a thread
Members:
board: Board object to build
commits: List of commit options to build.
"""
def __init__(self):
self.board = None
self.commits = []
class ResultThread(threading.Thread):
"""This thread processes results from builder threads.
It simply passes the results on to the builder. There is only one
result thread, and this helps to serialise the build output.
"""
def __init__(self, builder):
"""Set up a new result thread
Args:
builder: Builder which will be sent each result
"""
threading.Thread.__init__(self)
self.builder = builder
def run(self):
"""Called to start up the result thread.
We collect the next result job and pass it on to the build.
"""
while True:
result = self.builder.out_queue.get()
self.builder.ProcessResult(result)
self.builder.out_queue.task_done()
class BuilderThread(threading.Thread):
"""This thread builds U-Boot for a particular board.
An input queue provides each new job. We run 'make' to build U-Boot
and then pass the results on to the output queue.
Members:
builder: The builder which contains information we might need
thread_num: Our thread number (0-n-1), used to decide on a
temporary directory
"""
def __init__(self, builder, thread_num):
"""Set up a new builder thread"""
threading.Thread.__init__(self)
self.builder = builder
self.thread_num = thread_num
def Make(self, commit, brd, stage, cwd, *args, **kwargs):
"""Run 'make' on a particular commit and board.
The source code will already be checked out, so the 'commit'
argument is only for information.
Args:
commit: Commit object that is being built
brd: Board object that is being built
stage: Stage of the build. Valid stages are:
distclean - can be called to clean source
config - called to configure for a board
build - the main make invocation - it does the build
args: A list of arguments to pass to 'make'
kwargs: A list of keyword arguments to pass to command.RunPipe()
Returns:
CommandResult object
"""
return self.builder.do_make(commit, brd, stage, cwd, *args,
**kwargs)
def RunCommit(self, commit_upto, brd, work_dir, do_config, force_build):
"""Build a particular commit.
If the build is already done, and we are not forcing a build, we skip
the build and just return the previously-saved results.
Args:
commit_upto: Commit number to build (0...n-1)
brd: Board object to build
work_dir: Directory to which the source will be checked out
do_config: True to run a make <board>_config on the source
force_build: Force a build even if one was previously done
Returns:
tuple containing:
- CommandResult object containing the results of the build
- boolean indicating whether 'make config' is still needed
"""
# Create a default result - it will be overwritte by the call to
# self.Make() below, in the event that we do a build.
result = command.CommandResult()
result.return_code = 0
out_dir = os.path.join(work_dir, 'build')
# Check if the job was already completed last time
done_file = self.builder.GetDoneFile(commit_upto, brd.target)
result.already_done = os.path.exists(done_file)
if result.already_done and not force_build:
# Get the return code from that build and use it
with open(done_file, 'r') as fd:
result.return_code = int(fd.readline())
err_file = self.builder.GetErrFile(commit_upto, brd.target)
if os.path.exists(err_file) and os.stat(err_file).st_size:
result.stderr = 'bad'
else:
# We are going to have to build it. First, get a toolchain
if not self.toolchain:
try:
self.toolchain = self.builder.toolchains.Select(brd.arch)
except ValueError as err:
result.return_code = 10
result.stdout = ''
result.stderr = str(err)
# TODO(sjg@chromium.org): This gets swallowed, but needs
# to be reported.
if self.toolchain:
# Checkout the right commit
if commit_upto is not None:
commit = self.builder.commits[commit_upto]
if self.builder.checkout:
git_dir = os.path.join(work_dir, '.git')
gitutil.Checkout(commit.hash, git_dir, work_dir,
force=True)
else:
commit = self.builder.commit # Ick, fix this for BuildCommits()
# Set up the environment and command line
env = self.toolchain.MakeEnvironment()
Mkdir(out_dir)
args = ['O=build', '-s']
if self.builder.num_jobs is not None:
args.extend(['-j', str(self.builder.num_jobs)])
config_args = ['%s_config' % brd.target]
config_out = ''
# If we need to reconfigure, do that now
if do_config:
result = self.Make(commit, brd, 'distclean', work_dir,
'distclean', *args, env=env)
result = self.Make(commit, brd, 'config', work_dir,
*(args + config_args), env=env)
config_out = result.combined
do_config = False # No need to configure next time
if result.return_code == 0:
result = self.Make(commit, brd, 'build', work_dir, *args,
env=env)
result.stdout = config_out + result.stdout
else:
result.return_code = 1
result.stderr = 'No tool chain for %s\n' % brd.arch
result.already_done = False
result.toolchain = self.toolchain
result.brd = brd
result.commit_upto = commit_upto
result.out_dir = out_dir
return result, do_config
def _WriteResult(self, result, keep_outputs):
"""Write a built result to the output directory.
Args:
result: CommandResult object containing result to write
keep_outputs: True to store the output binaries, False
to delete them
"""
# Fatal error
if result.return_code < 0:
return
# Aborted?
if result.stderr and 'No child processes' in result.stderr:
return
if result.already_done:
return
# Write the output and stderr
output_dir = self.builder._GetOutputDir(result.commit_upto)
Mkdir(output_dir)
build_dir = self.builder.GetBuildDir(result.commit_upto,
result.brd.target)
Mkdir(build_dir)
outfile = os.path.join(build_dir, 'log')
with open(outfile, 'w') as fd:
if result.stdout:
fd.write(result.stdout)
errfile = self.builder.GetErrFile(result.commit_upto,
result.brd.target)
if result.stderr:
with open(errfile, 'w') as fd:
fd.write(result.stderr)
elif os.path.exists(errfile):
os.remove(errfile)
if result.toolchain:
# Write the build result and toolchain information.
done_file = self.builder.GetDoneFile(result.commit_upto,
result.brd.target)
with open(done_file, 'w') as fd:
fd.write('%s' % result.return_code)
with open(os.path.join(build_dir, 'toolchain'), 'w') as fd:
print >>fd, 'gcc', result.toolchain.gcc
print >>fd, 'path', result.toolchain.path
print >>fd, 'cross', result.toolchain.cross
print >>fd, 'arch', result.toolchain.arch
fd.write('%s' % result.return_code)
with open(os.path.join(build_dir, 'toolchain'), 'w') as fd:
print >>fd, 'gcc', result.toolchain.gcc
print >>fd, 'path', result.toolchain.path
# Write out the image and function size information and an objdump
env = result.toolchain.MakeEnvironment()
lines = []
for fname in ['u-boot', 'spl/u-boot-spl']:
cmd = ['%snm' % self.toolchain.cross, '--size-sort', fname]
nm_result = command.RunPipe([cmd], capture=True,
capture_stderr=True, cwd=result.out_dir,
raise_on_error=False, env=env)
if nm_result.stdout:
nm = self.builder.GetFuncSizesFile(result.commit_upto,
result.brd.target, fname)
with open(nm, 'w') as fd:
print >>fd, nm_result.stdout,
cmd = ['%sobjdump' % self.toolchain.cross, '-h', fname]
dump_result = command.RunPipe([cmd], capture=True,
capture_stderr=True, cwd=result.out_dir,
raise_on_error=False, env=env)
rodata_size = ''
if dump_result.stdout:
objdump = self.builder.GetObjdumpFile(result.commit_upto,
result.brd.target, fname)
with open(objdump, 'w') as fd:
print >>fd, dump_result.stdout,
for line in dump_result.stdout.splitlines():
fields = line.split()
if len(fields) > 5 and fields[1] == '.rodata':
rodata_size = fields[2]
cmd = ['%ssize' % self.toolchain.cross, fname]
size_result = command.RunPipe([cmd], capture=True,
capture_stderr=True, cwd=result.out_dir,
raise_on_error=False, env=env)
if size_result.stdout:
lines.append(size_result.stdout.splitlines()[1] + ' ' +
rodata_size)
# Write out the image sizes file. This is similar to the output
# of binutil's 'size' utility, but it omits the header line and
# adds an additional hex value at the end of each line for the
# rodata size
if len(lines):
sizes = self.builder.GetSizesFile(result.commit_upto,
result.brd.target)
with open(sizes, 'w') as fd:
print >>fd, '\n'.join(lines)
# Now write the actual build output
if keep_outputs:
patterns = ['u-boot', '*.bin', 'u-boot.dtb', '*.map',
'include/autoconf.mk', 'spl/u-boot-spl',
'spl/u-boot-spl.bin']
for pattern in patterns:
file_list = glob.glob(os.path.join(result.out_dir, pattern))
for fname in file_list:
shutil.copy(fname, build_dir)
def RunJob(self, job):
"""Run a single job
A job consists of a building a list of commits for a particular board.
Args:
job: Job to build
"""
brd = job.board
work_dir = self.builder.GetThreadDir(self.thread_num)
self.toolchain = None
if job.commits:
# Run 'make board_config' on the first commit
do_config = True
commit_upto = 0
force_build = False
for commit_upto in range(0, len(job.commits), job.step):
result, request_config = self.RunCommit(commit_upto, brd,
work_dir, do_config,
force_build or self.builder.force_build)
failed = result.return_code or result.stderr
if failed and not do_config:
# If our incremental build failed, try building again
# with a reconfig.
if self.builder.force_config_on_failure:
result, request_config = self.RunCommit(commit_upto,
brd, work_dir, True, True)
do_config = request_config
# If we built that commit, then config is done. But if we got
# an warning, reconfig next time to force it to build the same
# files that created warnings this time. Otherwise an
# incremental build may not build the same file, and we will
# think that the warning has gone away.
# We could avoid this by using -Werror everywhere...
# For errors, the problem doesn't happen, since presumably
# the build stopped and didn't generate output, so will retry
# that file next time. So we could detect warnings and deal
# with them specially here. For now, we just reconfigure if
# anything goes work.
# Of course this is substantially slower if there are build
# errors/warnings (e.g. 2-3x slower even if only 10% of builds
# have problems).
if (failed and not result.already_done and not do_config and
self.builder.force_config_on_failure):
# If this build failed, try the next one with a
# reconfigure.
# Sometimes if the board_config.h file changes it can mess
# with dependencies, and we get:
# make: *** No rule to make target `include/autoconf.mk',
# needed by `depend'.
do_config = True
force_build = True
else:
force_build = False
if self.builder.force_config_on_failure:
if failed:
do_config = True
result.commit_upto = commit_upto
if result.return_code < 0:
raise ValueError('Interrupt')
# We have the build results, so output the result
self._WriteResult(result, job.keep_outputs)
self.builder.out_queue.put(result)
else:
# Just build the currently checked-out build
result = self.RunCommit(None, True)
result.commit_upto = self.builder.upto
self.builder.out_queue.put(result)
def run(self):
"""Our thread's run function
This thread picks a job from the queue, runs it, and then goes to the
next job.
"""
alive = True
while True:
job = self.builder.queue.get()
try:
if self.builder.active and alive:
self.RunJob(job)
except Exception as err:
alive = False
print err
self.builder.queue.task_done()
class Builder:
"""Class for building U-Boot for a particular commit.
Public members: (many should ->private)
active: True if the builder is active and has not been stopped
already_done: Number of builds already completed
base_dir: Base directory to use for builder
checkout: True to check out source, False to skip that step.
This is used for testing.
col: terminal.Color() object
count: Number of commits to build
do_make: Method to call to invoke Make
fail: Number of builds that failed due to error
force_build: Force building even if a build already exists
force_config_on_failure: If a commit fails for a board, disable
incremental building for the next commit we build for that
board, so that we will see all warnings/errors again.
git_dir: Git directory containing source repository
last_line_len: Length of the last line we printed (used for erasing
it with new progress information)
num_jobs: Number of jobs to run at once (passed to make as -j)
num_threads: Number of builder threads to run
out_queue: Queue of results to process
re_make_err: Compiled regular expression for ignore_lines
queue: Queue of jobs to run
threads: List of active threads
toolchains: Toolchains object to use for building
upto: Current commit number we are building (0.count-1)
warned: Number of builds that produced at least one warning
Private members:
_base_board_dict: Last-summarised Dict of boards
_base_err_lines: Last-summarised list of errors
_build_period_us: Time taken for a single build (float object).
_complete_delay: Expected delay until completion (timedelta)
_next_delay_update: Next time we plan to display a progress update
(datatime)
_show_unknown: Show unknown boards (those not built) in summary
_timestamps: List of timestamps for the completion of the last
last _timestamp_count builds. Each is a datetime object.
_timestamp_count: Number of timestamps to keep in our list.
_working_dir: Base working directory containing all threads
"""
class Outcome:
"""Records a build outcome for a single make invocation
Public Members:
rc: Outcome value (OUTCOME_...)
err_lines: List of error lines or [] if none
sizes: Dictionary of image size information, keyed by filename
- Each value is itself a dictionary containing
values for 'text', 'data' and 'bss', being the integer
size in bytes of each section.
func_sizes: Dictionary keyed by filename - e.g. 'u-boot'. Each
value is itself a dictionary:
key: function name
value: Size of function in bytes
"""
def __init__(self, rc, err_lines, sizes, func_sizes):
self.rc = rc
self.err_lines = err_lines
self.sizes = sizes
self.func_sizes = func_sizes
def __init__(self, toolchains, base_dir, git_dir, num_threads, num_jobs,
checkout=True, show_unknown=True, step=1):
"""Create a new Builder object
Args:
toolchains: Toolchains object to use for building
base_dir: Base directory to use for builder
git_dir: Git directory containing source repository
num_threads: Number of builder threads to run
num_jobs: Number of jobs to run at once (passed to make as -j)
checkout: True to check out source, False to skip that step.
This is used for testing.
show_unknown: Show unknown boards (those not built) in summary
step: 1 to process every commit, n to process every nth commit
"""
self.toolchains = toolchains
self.base_dir = base_dir
self._working_dir = os.path.join(base_dir, '.bm-work')
self.threads = []
self.active = True
self.do_make = self.Make
self.checkout = checkout
self.num_threads = num_threads
self.num_jobs = num_jobs
self.already_done = 0
self.force_build = False
self.git_dir = git_dir
self._show_unknown = show_unknown
self._timestamp_count = 10
self._build_period_us = None
self._complete_delay = None
self._next_delay_update = datetime.now()
self.force_config_on_failure = True
self._step = step
self.col = terminal.Color()
self.queue = Queue.Queue()
self.out_queue = Queue.Queue()
for i in range(self.num_threads):
t = BuilderThread(self, i)
t.setDaemon(True)
t.start()
self.threads.append(t)
self.last_line_len = 0
t = ResultThread(self)
t.setDaemon(True)
t.start()
self.threads.append(t)
ignore_lines = ['(make.*Waiting for unfinished)', '(Segmentation fault)']
self.re_make_err = re.compile('|'.join(ignore_lines))
def __del__(self):
"""Get rid of all threads created by the builder"""
for t in self.threads:
del t
def _AddTimestamp(self):
"""Add a new timestamp to the list and record the build period.
The build period is the length of time taken to perform a single
build (one board, one commit).
"""
now = datetime.now()
self._timestamps.append(now)
count = len(self._timestamps)
delta = self._timestamps[-1] - self._timestamps[0]
seconds = delta.total_seconds()
# If we have enough data, estimate build period (time taken for a
# single build) and therefore completion time.
if count > 1 and self._next_delay_update < now:
self._next_delay_update = now + timedelta(seconds=2)
if seconds > 0:
self._build_period = float(seconds) / count
todo = self.count - self.upto
self._complete_delay = timedelta(microseconds=
self._build_period * todo * 1000000)
# Round it
self._complete_delay -= timedelta(
microseconds=self._complete_delay.microseconds)
if seconds > 60:
self._timestamps.popleft()
count -= 1
def ClearLine(self, length):
"""Clear any characters on the current line
Make way for a new line of length 'length', by outputting enough
spaces to clear out the old line. Then remember the new length for
next time.
Args:
length: Length of new line, in characters
"""
if length < self.last_line_len:
print ' ' * (self.last_line_len - length),
print '\r',
self.last_line_len = length
sys.stdout.flush()
def SelectCommit(self, commit, checkout=True):
"""Checkout the selected commit for this build
"""
self.commit = commit
if checkout and self.checkout:
gitutil.Checkout(commit.hash)
def Make(self, commit, brd, stage, cwd, *args, **kwargs):
"""Run make
Args:
commit: Commit object that is being built
brd: Board object that is being built
stage: Stage that we are at (distclean, config, build)
cwd: Directory where make should be run
args: Arguments to pass to make
kwargs: Arguments to pass to command.RunPipe()
"""
cmd = ['make'] + list(args)
result = command.RunPipe([cmd], capture=True, capture_stderr=True,
cwd=cwd, raise_on_error=False, **kwargs)
return result
def ProcessResult(self, result):
"""Process the result of a build, showing progress information
Args:
result: A CommandResult object
"""
col = terminal.Color()
if result:
target = result.brd.target
if result.return_code < 0:
self.active = False
command.StopAll()
return
self.upto += 1
if result.return_code != 0:
self.fail += 1
elif result.stderr:
self.warned += 1
if result.already_done:
self.already_done += 1
else:
target = '(starting)'
# Display separate counts for ok, warned and fail
ok = self.upto - self.warned - self.fail
line = '\r' + self.col.Color(self.col.GREEN, '%5d' % ok)
line += self.col.Color(self.col.YELLOW, '%5d' % self.warned)
line += self.col.Color(self.col.RED, '%5d' % self.fail)
name = ' /%-5d ' % self.count
# Add our current completion time estimate
self._AddTimestamp()
if self._complete_delay:
name += '%s : ' % self._complete_delay
# When building all boards for a commit, we can print a commit
# progress message.
if result and result.commit_upto is None:
name += 'commit %2d/%-3d' % (self.commit_upto + 1,
self.commit_count)
name += target
print line + name,
length = 13 + len(name)
self.ClearLine(length)
def _GetOutputDir(self, commit_upto):
"""Get the name of the output directory for a commit number
The output directory is typically .../<branch>/<commit>.
Args:
commit_upto: Commit number to use (0..self.count-1)
"""
commit = self.commits[commit_upto]
subject = commit.subject.translate(trans_valid_chars)
commit_dir = ('%02d_of_%02d_g%s_%s' % (commit_upto + 1,
self.commit_count, commit.hash, subject[:20]))
output_dir = os.path.join(self.base_dir, commit_dir)
return output_dir
def GetBuildDir(self, commit_upto, target):
"""Get the name of the build directory for a commit number
The build directory is typically .../<branch>/<commit>/<target>.
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
"""
output_dir = self._GetOutputDir(commit_upto)
return os.path.join(output_dir, target)
def GetDoneFile(self, commit_upto, target):
"""Get the name of the done file for a commit number
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
"""
return os.path.join(self.GetBuildDir(commit_upto, target), 'done')
def GetSizesFile(self, commit_upto, target):
"""Get the name of the sizes file for a commit number
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
"""
return os.path.join(self.GetBuildDir(commit_upto, target), 'sizes')
def GetFuncSizesFile(self, commit_upto, target, elf_fname):
"""Get the name of the funcsizes file for a commit number and ELF file
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
elf_fname: Filename of elf image
"""
return os.path.join(self.GetBuildDir(commit_upto, target),
'%s.sizes' % elf_fname.replace('/', '-'))
def GetObjdumpFile(self, commit_upto, target, elf_fname):
"""Get the name of the objdump file for a commit number and ELF file
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
elf_fname: Filename of elf image
"""
return os.path.join(self.GetBuildDir(commit_upto, target),
'%s.objdump' % elf_fname.replace('/', '-'))
def GetErrFile(self, commit_upto, target):
"""Get the name of the err file for a commit number
Args:
commit_upto: Commit number to use (0..self.count-1)
target: Target name
"""
output_dir = self.GetBuildDir(commit_upto, target)
return os.path.join(output_dir, 'err')
def FilterErrors(self, lines):
"""Filter out errors in which we have no interest
We should probably use map().
Args:
lines: List of error lines, each a string
Returns:
New list with only interesting lines included
"""
out_lines = []
for line in lines:
if not self.re_make_err.search(line):
out_lines.append(line)
return out_lines
def ReadFuncSizes(self, fname, fd):
"""Read function sizes from the output of 'nm'
Args:
fd: File containing data to read
fname: Filename we are reading from (just for errors)
Returns:
Dictionary containing size of each function in bytes, indexed by
function name.
"""
sym = {}
for line in fd.readlines():
try:
size, type, name = line[:-1].split()
except:
print "Invalid line in file '%s': '%s'" % (fname, line[:-1])
continue
if type in 'tTdDbB':
# function names begin with '.' on 64-bit powerpc
if '.' in name[1:]:
name = 'static.' + name.split('.')[0]
sym[name] = sym.get(name, 0) + int(size, 16)
return sym
def GetBuildOutcome(self, commit_upto, target, read_func_sizes):
"""Work out the outcome of a build.
Args:
commit_upto: Commit number to check (0..n-1)
target: Target board to check
read_func_sizes: True to read function size information
Returns:
Outcome object
"""
done_file = self.GetDoneFile(commit_upto, target)
sizes_file = self.GetSizesFile(commit_upto, target)
sizes = {}
func_sizes = {}
if os.path.exists(done_file):
with open(done_file, 'r') as fd:
return_code = int(fd.readline())
err_lines = []
err_file = self.GetErrFile(commit_upto, target)
if os.path.exists(err_file):
with open(err_file, 'r') as fd:
err_lines = self.FilterErrors(fd.readlines())
# Decide whether the build was ok, failed or created warnings
if return_code:
rc = OUTCOME_ERROR
elif len(err_lines):
rc = OUTCOME_WARNING
else:
rc = OUTCOME_OK
# Convert size information to our simple format
if os.path.exists(sizes_file):
with open(sizes_file, 'r') as fd:
for line in fd.readlines():
values = line.split()
rodata = 0
if len(values) > 6:
rodata = int(values[6], 16)
size_dict = {
'all' : int(values[0]) + int(values[1]) +
int(values[2]),
'text' : int(values[0]) - rodata,
'data' : int(values[1]),
'bss' : int(values[2]),
'rodata' : rodata,
}
sizes[values[5]] = size_dict
if read_func_sizes:
pattern = self.GetFuncSizesFile(commit_upto, target, '*')
for fname in glob.glob(pattern):
with open(fname, 'r') as fd:
dict_name = os.path.basename(fname).replace('.sizes',
'')
func_sizes[dict_name] = self.ReadFuncSizes(fname, fd)
return Builder.Outcome(rc, err_lines, sizes, func_sizes)
return Builder.Outcome(OUTCOME_UNKNOWN, [], {}, {})
def GetResultSummary(self, boards_selected, commit_upto, read_func_sizes):
"""Calculate a summary of the results of building a commit.
Args:
board_selected: Dict containing boards to summarise
commit_upto: Commit number to summarize (0..self.count-1)
read_func_sizes: True to read function size information
Returns:
Tuple:
Dict containing boards which passed building this commit.
keyed by board.target
List containing a summary of error/warning lines
"""
board_dict = {}
err_lines_summary = []
for board in boards_selected.itervalues():
outcome = self.GetBuildOutcome(commit_upto, board.target,
read_func_sizes)
board_dict[board.target] = outcome
for err in outcome.err_lines:
if err and not err.rstrip() in err_lines_summary:
err_lines_summary.append(err.rstrip())
return board_dict, err_lines_summary
def AddOutcome(self, board_dict, arch_list, changes, char, color):
"""Add an output to our list of outcomes for each architecture
This simple function adds failing boards (changes) to the
relevant architecture string, so we can print the results out
sorted by architecture.
Args:
board_dict: Dict containing all boards
arch_list: Dict keyed by arch name. Value is a string containing
a list of board names which failed for that arch.
changes: List of boards to add to arch_list
color: terminal.Colour object
"""
done_arch = {}
for target in changes:
if target in board_dict:
arch = board_dict[target].arch
else:
arch = 'unknown'
str = self.col.Color(color, ' ' + target)
if not arch in done_arch:
str = self.col.Color(color, char) + ' ' + str
done_arch[arch] = True
if not arch in arch_list:
arch_list[arch] = str
else:
arch_list[arch] += str
def ColourNum(self, num):
color = self.col.RED if num > 0 else self.col.GREEN
if num == 0:
return '0'
return self.col.Color(color, str(num))
def ResetResultSummary(self, board_selected):
"""Reset the results summary ready for use.
Set up the base board list to be all those selected, and set the
error lines to empty.
Following this, calls to PrintResultSummary() will use this
information to work out what has changed.
Args:
board_selected: Dict containing boards to summarise, keyed by
board.target
"""
self._base_board_dict = {}
for board in board_selected:
self._base_board_dict[board] = Builder.Outcome(0, [], [], {})
self._base_err_lines = []
def PrintFuncSizeDetail(self, fname, old, new):
grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0
delta, common = [], {}
for a in old:
if a in new:
common[a] = 1
for name in old:
if name not in common:
remove += 1
down += old[name]
delta.append([-old[name], name])
for name in new:
if name not in common:
add += 1
up += new[name]
delta.append([new[name], name])
for name in common:
diff = new.get(name, 0) - old.get(name, 0)
if diff > 0:
grow, up = grow + 1, up + diff
elif diff < 0:
shrink, down = shrink + 1, down - diff
delta.append([diff, name])
delta.sort()
delta.reverse()
args = [add, -remove, grow, -shrink, up, -down, up - down]
if max(args) == 0:
return
args = [self.ColourNum(x) for x in args]
indent = ' ' * 15
print ('%s%s: add: %s/%s, grow: %s/%s bytes: %s/%s (%s)' %
tuple([indent, self.col.Color(self.col.YELLOW, fname)] + args))
print '%s %-38s %7s %7s %+7s' % (indent, 'function', 'old', 'new',
'delta')
for diff, name in delta:
if diff:
color = self.col.RED if diff > 0 else self.col.GREEN
msg = '%s %-38s %7s %7s %+7d' % (indent, name,
old.get(name, '-'), new.get(name,'-'), diff)
print self.col.Color(color, msg)
def PrintSizeDetail(self, target_list, show_bloat):
"""Show details size information for each board
Args:
target_list: List of targets, each a dict containing:
'target': Target name
'total_diff': Total difference in bytes across all areas
<part_name>: Difference for that part
show_bloat: Show detail for each function
"""
targets_by_diff = sorted(target_list, reverse=True,
key=lambda x: x['_total_diff'])
for result in targets_by_diff:
printed_target = False
for name in sorted(result):
diff = result[name]
if name.startswith('_'):
continue
if diff != 0:
color = self.col.RED if diff > 0 else self.col.GREEN
msg = ' %s %+d' % (name, diff)
if not printed_target:
print '%10s %-15s:' % ('', result['_target']),
printed_target = True
print self.col.Color(color, msg),
if printed_target:
print
if show_bloat:
target = result['_target']
outcome = result['_outcome']
base_outcome = self._base_board_dict[target]
for fname in outcome.func_sizes:
self.PrintFuncSizeDetail(fname,
base_outcome.func_sizes[fname],
outcome.func_sizes[fname])
def PrintSizeSummary(self, board_selected, board_dict, show_detail,
show_bloat):
"""Print a summary of image sizes broken down by section.
The summary takes the form of one line per architecture. The
line contains deltas for each of the sections (+ means the section
got bigger, - means smaller). The nunmbers are the average number
of bytes that a board in this section increased by.
For example:
powerpc: (622 boards) text -0.0
arm: (285 boards) text -0.0
nds32: (3 boards) text -8.0
Args:
board_selected: Dict containing boards to summarise, keyed by
board.target
board_dict: Dict containing boards for which we built this
commit, keyed by board.target. The value is an Outcome object.
show_detail: Show detail for each board
show_bloat: Show detail for each function
"""
arch_list = {}
arch_count = {}
# Calculate changes in size for different image parts
# The previous sizes are in Board.sizes, for each board
for target in board_dict:
if target not in board_selected:
continue
base_sizes = self._base_board_dict[target].sizes
outcome = board_dict[target]
sizes = outcome.sizes
# Loop through the list of images, creating a dict of size
# changes for each image/part. We end up with something like
# {'target' : 'snapper9g45, 'data' : 5, 'u-boot-spl:text' : -4}
# which means that U-Boot data increased by 5 bytes and SPL
# text decreased by 4.
err = {'_target' : target}
for image in sizes:
if image in base_sizes:
base_image = base_sizes[image]
# Loop through the text, data, bss parts
for part in sorted(sizes[image]):
diff = sizes[image][part] - base_image[part]
col = None
if diff:
if image == 'u-boot':
name = part
else:
name = image + ':' + part
err[name] = diff
arch = board_selected[target].arch
if not arch in arch_count:
arch_count[arch] = 1
else:
arch_count[arch] += 1
if not sizes:
pass # Only add to our list when we have some stats
elif not arch in arch_list:
arch_list[arch] = [err]
else:
arch_list[arch].append(err)
# We now have a list of image size changes sorted by arch
# Print out a summary of these
for arch, target_list in arch_list.iteritems():
# Get total difference for each type
totals = {}
for result in target_list:
total = 0
for name, diff in result.iteritems():
if name.startswith('_'):
continue
total += diff
if name in totals:
totals[name] += diff
else:
totals[name] = diff
result['_total_diff'] = total
result['_outcome'] = board_dict[result['_target']]
count = len(target_list)
printed_arch = False
for name in sorted(totals):
diff = totals[name]
if diff:
# Display the average difference in this name for this
# architecture
avg_diff = float(diff) / count
color = self.col.RED if avg_diff > 0 else self.col.GREEN
msg = ' %s %+1.1f' % (name, avg_diff)
if not printed_arch:
print '%10s: (for %d/%d boards)' % (arch, count,
arch_count[arch]),
printed_arch = True
print self.col.Color(color, msg),
if printed_arch:
print
if show_detail:
self.PrintSizeDetail(target_list, show_bloat)
def PrintResultSummary(self, board_selected, board_dict, err_lines,
show_sizes, show_detail, show_bloat):
"""Compare results with the base results and display delta.
Only boards mentioned in board_selected will be considered. This
function is intended to be called repeatedly with the results of
each commit. It therefore shows a 'diff' between what it saw in
the last call and what it sees now.
Args:
board_selected: Dict containing boards to summarise, keyed by
board.target
board_dict: Dict containing boards for which we built this
commit, keyed by board.target. The value is an Outcome object.
err_lines: A list of errors for this commit, or [] if there is
none, or we don't want to print errors
show_sizes: Show image size deltas
show_detail: Show detail for each board
show_bloat: Show detail for each function
"""
better = [] # List of boards fixed since last commit
worse = [] # List of new broken boards since last commit
new = [] # List of boards that didn't exist last time
unknown = [] # List of boards that were not built
for target in board_dict:
if target not in board_selected:
continue
# If the board was built last time, add its outcome to a list
if target in self._base_board_dict:
base_outcome = self._base_board_dict[target].rc
outcome = board_dict[target]
if outcome.rc == OUTCOME_UNKNOWN:
unknown.append(target)
elif outcome.rc < base_outcome:
better.append(target)
elif outcome.rc > base_outcome:
worse.append(target)
else:
new.append(target)
# Get a list of errors that have appeared, and disappeared
better_err = []
worse_err = []
for line in err_lines:
if line not in self._base_err_lines:
worse_err.append('+' + line)
for line in self._base_err_lines:
if line not in err_lines:
better_err.append('-' + line)
# Display results by arch
if better or worse or unknown or new or worse_err or better_err:
arch_list = {}
self.AddOutcome(board_selected, arch_list, better, '',
self.col.GREEN)
self.AddOutcome(board_selected, arch_list, worse, '+',
self.col.RED)
self.AddOutcome(board_selected, arch_list, new, '*', self.col.BLUE)
if self._show_unknown:
self.AddOutcome(board_selected, arch_list, unknown, '?',
self.col.MAGENTA)
for arch, target_list in arch_list.iteritems():
print '%10s: %s' % (arch, target_list)
if better_err:
print self.col.Color(self.col.GREEN, '\n'.join(better_err))
if worse_err:
print self.col.Color(self.col.RED, '\n'.join(worse_err))
if show_sizes:
self.PrintSizeSummary(board_selected, board_dict, show_detail,
show_bloat)
# Save our updated information for the next call to this function
self._base_board_dict = board_dict
self._base_err_lines = err_lines
# Get a list of boards that did not get built, if needed
not_built = []
for board in board_selected:
if not board in board_dict:
not_built.append(board)
if not_built:
print "Boards not built (%d): %s" % (len(not_built),
', '.join(not_built))
def ShowSummary(self, commits, board_selected, show_errors, show_sizes,
show_detail, show_bloat):
"""Show a build summary for U-Boot for a given board list.
Reset the result summary, then repeatedly call GetResultSummary on
each commit's results, then display the differences we see.
Args:
commit: Commit objects to summarise
board_selected: Dict containing boards to summarise
show_errors: Show errors that occured
show_sizes: Show size deltas
show_detail: Show detail for each board
show_bloat: Show detail for each function
"""
self.commit_count = len(commits)
self.commits = commits
self.ResetResultSummary(board_selected)
for commit_upto in range(0, self.commit_count, self._step):
board_dict, err_lines = self.GetResultSummary(board_selected,
commit_upto, read_func_sizes=show_bloat)
msg = '%02d: %s' % (commit_upto + 1, commits[commit_upto].subject)
print self.col.Color(self.col.BLUE, msg)
self.PrintResultSummary(board_selected, board_dict,
err_lines if show_errors else [], show_sizes, show_detail,
show_bloat)
def SetupBuild(self, board_selected, commits):
"""Set up ready to start a build.
Args:
board_selected: Selected boards to build
commits: Selected commits to build
"""
# First work out how many commits we will build
count = (len(commits) + self._step - 1) / self._step
self.count = len(board_selected) * count
self.upto = self.warned = self.fail = 0
self._timestamps = collections.deque()
def BuildBoardsForCommit(self, board_selected, keep_outputs):
"""Build all boards for a single commit"""
self.SetupBuild(board_selected)
self.count = len(board_selected)
for brd in board_selected.itervalues():
job = BuilderJob()
job.board = brd
job.commits = None
job.keep_outputs = keep_outputs
self.queue.put(brd)
self.queue.join()
self.out_queue.join()
print
self.ClearLine(0)
def BuildCommits(self, commits, board_selected, show_errors, keep_outputs):
"""Build all boards for all commits (non-incremental)"""
self.commit_count = len(commits)
self.ResetResultSummary(board_selected)
for self.commit_upto in range(self.commit_count):
self.SelectCommit(commits[self.commit_upto])
self.SelectOutputDir()
Mkdir(self.output_dir)
self.BuildBoardsForCommit(board_selected, keep_outputs)
board_dict, err_lines = self.GetResultSummary()
self.PrintResultSummary(board_selected, board_dict,
err_lines if show_errors else [])
if self.already_done:
print '%d builds already done' % self.already_done
def GetThreadDir(self, thread_num):
"""Get the directory path to the working dir for a thread.
Args:
thread_num: Number of thread to check.
"""
return os.path.join(self._working_dir, '%02d' % thread_num)
def _PrepareThread(self, thread_num):
"""Prepare the working directory for a thread.
This clones or fetches the repo into the thread's work directory.
Args:
thread_num: Thread number (0, 1, ...)
"""
thread_dir = self.GetThreadDir(thread_num)
Mkdir(thread_dir)
git_dir = os.path.join(thread_dir, '.git')
# Clone the repo if it doesn't already exist
# TODO(sjg@chromium): Perhaps some git hackery to symlink instead, so
# we have a private index but uses the origin repo's contents?
if self.git_dir:
src_dir = os.path.abspath(self.git_dir)
if os.path.exists(git_dir):
gitutil.Fetch(git_dir, thread_dir)
else:
print 'Cloning repo for thread %d' % thread_num
gitutil.Clone(src_dir, thread_dir)
def _PrepareWorkingSpace(self, max_threads):
"""Prepare the working directory for use.
Set up the git repo for each thread.
Args:
max_threads: Maximum number of threads we expect to need.
"""
Mkdir(self._working_dir)
for thread in range(max_threads):
self._PrepareThread(thread)
def _PrepareOutputSpace(self):
"""Get the output directories ready to receive files.
We delete any output directories which look like ones we need to
create. Having left over directories is confusing when the user wants
to check the output manually.
"""
dir_list = []
for commit_upto in range(self.commit_count):
dir_list.append(self._GetOutputDir(commit_upto))
for dirname in glob.glob(os.path.join(self.base_dir, '*')):
if dirname not in dir_list:
shutil.rmtree(dirname)
def BuildBoards(self, commits, board_selected, show_errors, keep_outputs):
"""Build all commits for a list of boards
Args:
commits: List of commits to be build, each a Commit object
boards_selected: Dict of selected boards, key is target name,
value is Board object
show_errors: True to show summarised error/warning info
keep_outputs: True to save build output files
"""
self.commit_count = len(commits)
self.commits = commits
self.ResetResultSummary(board_selected)
Mkdir(self.base_dir)
self._PrepareWorkingSpace(min(self.num_threads, len(board_selected)))
self._PrepareOutputSpace()
self.SetupBuild(board_selected, commits)
self.ProcessResult(None)
# Create jobs to build all commits for each board
for brd in board_selected.itervalues():
job = BuilderJob()
job.board = brd
job.commits = commits
job.keep_outputs = keep_outputs
job.step = self._step
self.queue.put(job)
# Wait until all jobs are started
self.queue.join()
# Wait until we have processed all output
self.out_queue.join()
print
self.ClearLine(0)
|
nschloe/quadpy
|
refs/heads/main
|
tools/xiao_gimbutas/import_xiao_gimbutas_tri.py
|
1
|
"""
Parse Fortran code to extract points and weight of the Xiao-Gimbutas schemes.
"""
import numpy as np
def _parsed_strings_to_array(strings):
return np.array(
[
val.replace(",", "").replace("&", "").replace("/", "").replace("D", "e")
for val in strings
],
dtype=float,
)
def _parse():
# The Fortran file contains multiple sections like
# ```
# data xs / &
# 0.00000000000000000000000000000000D+00/
# [...]
# data ys / &
# 0.00000000000000000000000000000000D+00/
# [...]
# data ws / &
# 0.21934566882541541013653648363283D+00/
# [...]
# ```
# Find those and extract the data.
data = []
with open("symq.txt") as f:
while True:
line = f.readline()
if not line:
# EOF
break
line = line.strip()
# skip if not at the start of a data block
if line[:7] != "data xs":
continue
# start of a data block
xstr = []
while line[-1] == "&":
line = f.readline().strip()
xstr.append(line)
line = f.readline().strip()
assert line[:7] == "data ys"
ystr = []
while line[-1] == "&":
line = f.readline().strip()
ystr.append(line)
line = f.readline().strip()
assert line[:7] == "data ws"
wstr = []
while line[-1] == "&":
line = f.readline().strip()
wstr.append(line)
points = np.column_stack(
[_parsed_strings_to_array(xstr), _parsed_strings_to_array(ystr)]
)
weights = _parsed_strings_to_array(wstr)
data.append((points, weights))
return data
def _extract_bary_data(data):
# The points are given in terms of coordinates of a reference triangle. Convert to
# barycentric coordinates, and check their symmetry there.
t0 = [-1, -1 / np.sqrt(3)]
t1 = [+1, -1 / np.sqrt(3)]
t2 = [0, 2 / np.sqrt(3)]
T = np.array([[t1[0] - t0[0], t2[0] - t0[0]], [t1[1] - t0[1], t2[1] - t0[1]]])
tol = 1.0e-10
all_dicts = []
ref_weight = 0.21934566882541541013653648363283
for k, item in enumerate(data):
points, weights = item
b = (points - t0).T
sol = np.linalg.solve(T, b)
bary = np.column_stack([sol[0], sol[1], 1.0 - sol[0] - sol[1]])
d = {"s1": [], "s2": [], "s3": [], "degree": k + 1}
for w, b in zip(weights, bary):
if np.all(np.abs(b - 1.0 / 3.0) < tol):
weight = w / ref_weight
d["s3"].append([weight])
elif abs(b[0] - b[1]) < tol:
weight = w / ref_weight / 3
d["s2"].append([weight, b[0]])
elif abs(b[1] - b[2]) < tol:
weight = w / ref_weight / 3
d["s2"].append([weight, b[1]])
elif abs(b[2] - b[0]) < tol:
weight = w / ref_weight / 3
d["s2"].append([weight, b[0]])
else:
srt = np.sort(b)
weight = w / ref_weight / 6
d["s1"].append([weight, srt[0], srt[1]])
for key in ["s1", "s2", "s3"]:
if len(d[key]) == 0:
d.pop(key)
all_dicts.append(d)
return all_dicts
def _main():
data = _parse()
all_dicts = _extract_bary_data(data)
# Write the json files.
# Getting floats in scientific notation in python.json is almost impossible, so do
# some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.
class PrettyFloat(float):
def __repr__(self):
return f"{self:.16e}"
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(obj)
elif isinstance(obj, dict):
return {k: pretty_floats(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
for d in all_dicts:
degree = d["degree"]
with open(f"xg{degree:02d}.json", "w") as f:
string = (
pretty_floats(d)
.__repr__()
.replace("'", '"')
.replace("{", "{\n ")
.replace("[[", "[\n [")
.replace("], [", "],\n [")
.replace(']], "', ']\n ],\n "')
.replace("}", "\n}")
)
f.write(string)
return
if __name__ == "__main__":
_main()
|
jinser/automate_pydatastream
|
refs/heads/master
|
getcustom.py
|
1
|
from pydatastream import Datastream
import json
import datetime
import sys
import os.path
#hardcoded directories
dir_input = "input/"
dir_output = "output/"
#check that the login credentials and input file location are being passed in
numOfArgs = len(sys.argv) - 1
if numOfArgs != 3:
print "Please run this python script with username,password and input file location in that order respectively."
exit()
#Setup login credentials and input file location
username = str(sys.argv[1])
pw = str(sys.argv[2])
input_file_loc = dir_input + str(sys.argv[3])
#Ensure that the input file location exists
if ( not os.path.isfile(str(input_file_loc)) ):
print "The file " + str(input_file_loc) + " does not exist."
exit()
#login credentials to datastream
DWE = Datastream(username=username,password=pw)
#other info from datastream
info = DWE.system_info()
subscribed_sources = DWE.sources()
#replace missing data with NaNs
DWE.raise_on_error = False
#get all codes, groups, start dates from input file
with open(input_file_loc,'r') as input_file:
symbol_ref = json.load(input_file)
#download timestamp
download_date = {'Custom_Download_Date' : datetime.datetime.now().isoformat()}
#calculate time taken for entire process
time_taken = datetime.datetime.now()
time_taken = time_taken - time_taken
for desc,desc_value in symbol_ref.iteritems():
for group,group_value in desc_value.iteritems():
#create list for custom fields
custom_fields = list()
for code_key,code_value in group_value.iteritems():
for key,value in code_value.iteritems():
if(key == 'code'):
search_code = value
search_symbol = {'Custom_Ticker' : value}
if(key == 'start_date'):
start_date = value
if(key == 'custom_field'):
custom_fields[:] = []
custom_fields.append(value)
startTime = datetime.datetime.now()
#send request to retrieve the data from Datastream
req = DWE.fetch(str(search_code),custom_fields,date_from=str(start_date),only_data=False)
time_taken = time_taken + datetime.datetime.now() - startTime
#format date and convert to json
raw_json = req[0].to_json(date_format='iso')
raw_metadata = req[1].to_json()
#Data cleaning and processing
#remove the time component including the '.' char from the key values of datetime in the data
raw_json = raw_json.replace("T00:00:00.000Z","")
#replace the metadata's keys from "0" to "default_ws_key"
raw_metadata = raw_metadata.replace("\"0\"","\"Custom_WS_Key\"")
#combine the data and the metadata about the code
allData_str = json.loads(raw_json)
metadata_str = json.loads(raw_metadata)
datastream_combined = {key : value for (key,value) in (allData_str.items() + metadata_str.items())}
#create symbol json string and append to data
data_with_symbol = {key : value for (key,value) in (search_symbol.items() + datastream_combined.items())}
#append group
group_code = {'Custom_Group' : group}
data_with_group = {key : value for (key,value) in (group_code.items() + data_with_symbol.items())}
#append category
category = {'Custom_Description' : desc}
data_with_category = {key : value for (key,value) in (category.items() + data_with_group.items())}
#append download timestamp
final_data = {key : value for (key,value) in (download_date.items() + data_with_category.items())}
final_data_json = json.dumps(final_data)
#decode to the right format for saving to disk
json_file = json.JSONDecoder().decode((final_data_json))
#save to json file on server
if(len(group_value) > 1):
filename = dir_output + desc + '_' + group + '_' + code_key + '.json'
else:
filename = dir_output + desc + '_' + group + '.json'
with open(filename,'w') as outfile:
json.dump(json_file,outfile,sort_keys=True)
print "time taken for " + str(sys.argv[3]) + " to be retrieved: " + str(time_taken)
|
Vassy/odoo
|
refs/heads/master
|
addons/l10n_ma/l10n_ma.py
|
39
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_ma_report(osv.osv):
_name = 'l10n.ma.report'
_description = 'Report for l10n_ma_kzc'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name', size=128),
'line_ids': fields.one2many('l10n.ma.line', 'report_id', 'Lines'),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_ma_line(osv.osv):
_name = 'l10n.ma.line'
_description = 'Report Lines for l10n_ma'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition', size=512),
'name': fields.char('Name', size=256),
'report_id': fields.many2one('l10n.ma.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mkhuthir/learnPython
|
refs/heads/master
|
Book_learning-python-r1.1/ch3/for.no.else.py
|
1
|
class DriverException(Exception):
pass
people = [('James', 17), ('Kirk', 9), ('Lars', 13), ('Robert', 8)]
driver = None
for person, age in people:
if age >= 18:
driver = (person, age)
break
if driver is None:
raise DriverException('Driver not found.')
|
bdang2012/taiga-back-casting
|
refs/heads/branch_casting
|
taiga/projects/history/templatetags/functions.py
|
1
|
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext_lazy as _
from django_jinja import library
EXTRA_FIELD_VERBOSE_NAMES = {
"description_diff": _("description"),
"content_diff": _("content"),
"blocked_note_diff": _("blocked note"),
"milestone": _("sprint"),
}
@library.global_function
def verbose_name(obj_class, field_name):
if field_name in EXTRA_FIELD_VERBOSE_NAMES:
return EXTRA_FIELD_VERBOSE_NAMES[field_name]
try:
return obj_class._meta.get_field(field_name).verbose_name
except Exception:
return field_name
@library.global_function
def lists_diff(list1, list2):
"""
Get the difference of two list and remove None values.
>>> list1 = ["a", None, "b", "c"]
>>> list2 = [None, "b", "d", "e"]
>>> list(filter(None.__ne__, set(list1) - set(list2)))
['c', 'a']
"""
return list(filter(None.__ne__, set(list1) - set(list2)))
|
MediffRobotics/DeepRobotics
|
refs/heads/master
|
DeepLearnMaterials/tutorials/tensorflowTUT/tf18_CNN3/for_you_to_practice.py
|
2
|
# View more python tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
return result
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
## conv1 layer ##
## conv2 layer ##
## func1 layer ##
## func2 layer ##
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.Session()
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
if i % 50 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
|
ghandiosm/Test
|
refs/heads/master
|
addons/website_event_track/__init__.py
|
1023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import models
|
marinho/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/django/middleware/cache.py
|
14
|
from django.conf import settings
from django.core.cache import cache
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers, get_max_age
class CacheMiddleware(object):
"""
Cache middleware. If this is enabled, each Django-powered page will be
cached (based on URLs).
Only parameter-less GET or HEAD-requests with status code 200 are cached.
The number of seconds each page is stored for is set by the
"max-age" section of the response's "Cache-Control" header, falling back to
the CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a
simple and effective way of avoiding the caching of the Django admin (and
any other user-specific content).
This middleware expects that a HEAD request is answered with a response
exactly like the corresponding GET request.
When a hit occurs, a shallow copy of the original response object is
returned from process_request.
Pages will be cached based on the contents of the request headers
listed in the response's "Vary" header. This means that pages shouldn't
change their "Vary" header.
This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):
self.cache_timeout = cache_timeout
if cache_timeout is None:
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = key_prefix
if key_prefix is None:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
def process_request(self, request):
"Checks whether the page is already cached and returns the cached version if available."
if self.cache_anonymous_only:
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if not request.method in ('GET', 'HEAD') or request.GET:
request._cache_update_cache = False
return None # Don't bother checking the cache.
if self.cache_anonymous_only and request.user.is_authenticated():
request._cache_update_cache = False
return None # Don't cache requests from authenticated users.
cache_key = get_cache_key(request, self.key_prefix)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
request._cache_update_cache = False
return response
def process_response(self, request, response):
"Sets the cache, if needed."
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
# We don't need to update the cache, just return.
return response
if request.method != 'GET':
# This is a stronger requirement than above. It is needed
# because of interactions between this middleware and the
# HTTPMiddleware, which throws the body of a HEAD-request
# away before this middleware gets a chance to cache it.
return response
if not response.status_code == 200:
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
cache_key = learn_cache_key(request, response, timeout, self.key_prefix)
cache.set(cache_key, response, timeout)
return response
|
PopCap/GameIdea
|
refs/heads/master
|
Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/tools/split.py
|
2
|
import sys
import os
from sets import Set
def split_javascript_file(input_filename, output_filename_prefix, max_part_size_in_bytes):
try:
# Javascript main file. On execution, this file needs to be loaded at last (!)
output_main_filename = output_filename_prefix + ".js"
output_main_file = open(output_main_filename,'w')
# File with HTML script tags to load the Javascript files in HTML later on
output_html_include_file = open(output_filename_prefix + ".include.html",'w')
# Variable will contain the source of a Javascript function if we find one during parsing
js_function = None
# Dictionary with lower case source file as key and a tupel of case sensitive source file name (first encountered case wins)
# and an array of functions associated to that source file as value
function_buckets = {};
output_part_file = None
# Locate names of all the source files (.c/.cpp) that produced output to the .js file.
source_files = Set()
for line in open(input_filename,'r'):
if line.startswith("//FUNCTION_END_MARKER_OF_SOURCE_FILE_"):
associated_source_file_base = line[len("//FUNCTION_END_MARKER_OF_SOURCE_FILE_"):len(line)-1]
if not associated_source_file_base == "NO_SOURCE":
source_files.add(os.path.dirname(os.path.abspath(os.path.realpath(associated_source_file_base))))
common_source_file_prefix = os.path.commonprefix(list(source_files))
# Iterate over Javascript source; write main file; parse function declarations.
input_file = open(input_filename,'r')
for line in input_file:
if line == "//FUNCTION_BEGIN_MARKER\n":
js_function = "//Func\n"
elif line.startswith("//FUNCTION_END_MARKER_OF_SOURCE_FILE_"):
# At the end of the function marker we get the source file that is associated to that function.
associated_source_file_base = line[len("//FUNCTION_END_MARKER_OF_SOURCE_FILE_"):len(line)-1]
if associated_source_file_base == "NO_SOURCE":
# Functions without associated source file are stored in a file in the base directory
associated_source_file_base = output_filename_prefix + "_functions";
else:
# Functions with a known associated source file are stored in a file in the directory `output_filename_prefix`
associated_source_file_base = os.path.join(output_filename_prefix, os.path.relpath(os.path.abspath(os.path.realpath(associated_source_file_base)), common_source_file_prefix))
associated_source_file_base_lower = associated_source_file_base.lower()
# Add the function to its respective file
if associated_source_file_base_lower not in function_buckets:
function_buckets[associated_source_file_base_lower] = [associated_source_file_base, []]
function_buckets[associated_source_file_base_lower][1] += [js_function]
# Clear the function read cache
js_function = None
else:
if js_function is None:
output_main_file.write(line)
else:
js_function += line
# Iterate over all function buckets and write their functions to the associated files
# An associated file is split into chunks of `max_part_size_in_bytes`
for associated_source_file_base in function_buckets:
# At first we try to name the Javascript source file to match the assoicated source file + `.js`
js_source_file = function_buckets[associated_source_file_base][0] + ".js"
# Check if the directory of the Javascript source file exists
js_source_dir = os.path.dirname(js_source_file)
if len(js_source_dir) > 0 and not os.path.exists(js_source_dir):
os.makedirs(js_source_dir)
output_part_file_counter = 0
output_part_file = None
for js_function in function_buckets[associated_source_file_base][1]:
if output_part_file is None:
output_html_include_file.write("<script type=\"text/javascript\" src=\"" + js_source_file.replace('\\', '/') + "\"></script>")
output_part_file = open(js_source_file,'w')
output_part_file.write(js_function)
if output_part_file is not None and output_part_file.tell() > max_part_size_in_bytes:
output_part_file.close()
output_part_file = None
output_part_file_counter += 1
js_source_file = function_buckets[associated_source_file_base][0] + ".part" + str(output_part_file_counter) + ".js"
if output_part_file is not None:
output_part_file.close()
output_part_file = None
# Write the main Javascript file at last to the HTML includes because this file contains the code to start
# the execution of the generated Emscripten application and requires all the extracted functions.
output_html_include_file.write("<script type=\"text/javascript\" src=\"" + output_main_filename.replace('\\', '/') + "\"></script>")
except Exception, e:
print >> sys.stderr, 'error: Splitting of Emscripten generated Javascript failed: %s' % str(e)
finally:
if input_file is not None: input_file.close()
if output_main_file is not None: output_main_file.close()
if output_part_file is not None: output_part_file.close()
if output_html_include_file is not None: output_html_include_file.close()
|
fintech-circle/edx-platform
|
refs/heads/master
|
lms/djangoapps/course_api/blocks/transformers/tests/test_student_view.py
|
7
|
"""
Tests for StudentViewTransformer.
"""
# pylint: disable=protected-access
from openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
from ..student_view import StudentViewTransformer
class TestStudentViewTransformer(ModuleStoreTestCase):
"""
Test proper behavior for StudentViewTransformer
"""
def setUp(self):
super(TestStudentViewTransformer, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
StudentViewTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
StudentViewTransformer('video').transform(usage_info=None, block_structure=self.block_structure)
# verify video data
video_block_key = self.course_key.make_usage_key('video', 'sample_video')
self.assertIsNotNone(
self.block_structure.get_transformer_block_field(
video_block_key, StudentViewTransformer, StudentViewTransformer.STUDENT_VIEW_DATA,
)
)
self.assertFalse(
self.block_structure.get_transformer_block_field(
video_block_key, StudentViewTransformer, StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE,
)
)
# verify html data
html_block_key = self.course_key.make_usage_key('html', 'toyhtml')
self.assertIsNone(
self.block_structure.get_transformer_block_field(
html_block_key, StudentViewTransformer, StudentViewTransformer.STUDENT_VIEW_DATA,
)
)
self.assertTrue(
self.block_structure.get_transformer_block_field(
html_block_key, StudentViewTransformer, StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE,
)
)
|
adsznzhang/learntosolveit
|
refs/heads/version1
|
languages/python/design_stack.py
|
7
|
"""
Implementation of stack data structure in Python.
"""
class Stack:
def __init__(self,*vargs):
self.stack = list(vargs)
def __repr__(self):
return str(self.stack)
def top(self):
return self.stack[0]
def push(self,elem):
self.stack.insert(0,elem)
def pop(self):
return self.stack.pop(0)
if __name__ == '__main__':
stk = Stack(1,2,3,4)
print stk
print stk.top()
stk.push(10)
print stk
print stk.pop()
print stk
|
Markcial/python-tutor
|
refs/heads/master
|
package/module.py
|
1
|
variable = 'foo'
def function():
return 'hello world!'
class Class:
pass
|
clebergnu/autotest
|
refs/heads/master
|
frontend/migrations/050_more_test_planner_additions.py
|
18
|
UP_SQL = """
ALTER TABLE `planner_test_runs` ADD CONSTRAINT `test_runs_unique` UNIQUE KEY (`plan_id`, `test_job_id`, `tko_test_id`, `host_id`);
ALTER TABLE `planner_tests` ADD COLUMN `is_server` tinyint(1) DEFAULT 1;
ALTER TABLE `planner_hosts` ADD COLUMN `added_by_label` tinyint(1) DEFAULT 0;
"""
DOWN_SQL = """
ALTER TABLE `planner_hosts` DROP COLUMN `added_by_label`;
ALTER TABLE `planner_tests` DROP COLUMN `is_server`;
ALTER TABLE `planner_test_runs` DROP KEY `test_runs_unique`;
"""
|
cs-shadow/phabricator-tools
|
refs/heads/master
|
py/phl/phlmail_mocksender__t.py
|
4
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import phlmail_mocksender
class Test(unittest.TestCase):
def test_empty(self):
mailsender = phlmail_mocksender.MailSender()
self.assertEqual(len(mailsender.mailboxes), 0)
self.assertEqual(len(mailsender.mails), 0)
def test_oneTo(self):
mailsender = phlmail_mocksender.MailSender()
subject = "subject"
message = "message"
to = "someone@server.test"
mailsender.send(subject, message, [to])
self.assertEqual(len(mailsender.mailboxes), 1)
self.assertEqual(len(mailsender.mails), 1)
self.assertEqual(mailsender.mailboxes[to][0], mailsender.mails[0])
mail = mailsender.mails[0]
self.assertEqual(mail.subject, subject)
self.assertEqual(mail.message, message)
self.assertEqual(mail.to_addresses, [to])
self.assertIsNone(mail.cc_addresses)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
mega-force/osmc
|
refs/heads/master
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x060.py
|
250
|
data = (
'Huai ', # 0x00
'Tai ', # 0x01
'Song ', # 0x02
'Wu ', # 0x03
'Ou ', # 0x04
'Chang ', # 0x05
'Chuang ', # 0x06
'Ju ', # 0x07
'Yi ', # 0x08
'Bao ', # 0x09
'Chao ', # 0x0a
'Min ', # 0x0b
'Pei ', # 0x0c
'Zuo ', # 0x0d
'Zen ', # 0x0e
'Yang ', # 0x0f
'Kou ', # 0x10
'Ban ', # 0x11
'Nu ', # 0x12
'Nao ', # 0x13
'Zheng ', # 0x14
'Pa ', # 0x15
'Bu ', # 0x16
'Tie ', # 0x17
'Gu ', # 0x18
'Hu ', # 0x19
'Ju ', # 0x1a
'Da ', # 0x1b
'Lian ', # 0x1c
'Si ', # 0x1d
'Chou ', # 0x1e
'Di ', # 0x1f
'Dai ', # 0x20
'Yi ', # 0x21
'Tu ', # 0x22
'You ', # 0x23
'Fu ', # 0x24
'Ji ', # 0x25
'Peng ', # 0x26
'Xing ', # 0x27
'Yuan ', # 0x28
'Ni ', # 0x29
'Guai ', # 0x2a
'Fu ', # 0x2b
'Xi ', # 0x2c
'Bi ', # 0x2d
'You ', # 0x2e
'Qie ', # 0x2f
'Xuan ', # 0x30
'Cong ', # 0x31
'Bing ', # 0x32
'Huang ', # 0x33
'Xu ', # 0x34
'Chu ', # 0x35
'Pi ', # 0x36
'Xi ', # 0x37
'Xi ', # 0x38
'Tan ', # 0x39
'Koraeru ', # 0x3a
'Zong ', # 0x3b
'Dui ', # 0x3c
'[?] ', # 0x3d
'Ki ', # 0x3e
'Yi ', # 0x3f
'Chi ', # 0x40
'Ren ', # 0x41
'Xun ', # 0x42
'Shi ', # 0x43
'Xi ', # 0x44
'Lao ', # 0x45
'Heng ', # 0x46
'Kuang ', # 0x47
'Mu ', # 0x48
'Zhi ', # 0x49
'Xie ', # 0x4a
'Lian ', # 0x4b
'Tiao ', # 0x4c
'Huang ', # 0x4d
'Die ', # 0x4e
'Hao ', # 0x4f
'Kong ', # 0x50
'Gui ', # 0x51
'Heng ', # 0x52
'Xi ', # 0x53
'Xiao ', # 0x54
'Shu ', # 0x55
'S ', # 0x56
'Kua ', # 0x57
'Qiu ', # 0x58
'Yang ', # 0x59
'Hui ', # 0x5a
'Hui ', # 0x5b
'Chi ', # 0x5c
'Jia ', # 0x5d
'Yi ', # 0x5e
'Xiong ', # 0x5f
'Guai ', # 0x60
'Lin ', # 0x61
'Hui ', # 0x62
'Zi ', # 0x63
'Xu ', # 0x64
'Chi ', # 0x65
'Xiang ', # 0x66
'Nu ', # 0x67
'Hen ', # 0x68
'En ', # 0x69
'Ke ', # 0x6a
'Tong ', # 0x6b
'Tian ', # 0x6c
'Gong ', # 0x6d
'Quan ', # 0x6e
'Xi ', # 0x6f
'Qia ', # 0x70
'Yue ', # 0x71
'Peng ', # 0x72
'Ken ', # 0x73
'De ', # 0x74
'Hui ', # 0x75
'E ', # 0x76
'Kyuu ', # 0x77
'Tong ', # 0x78
'Yan ', # 0x79
'Kai ', # 0x7a
'Ce ', # 0x7b
'Nao ', # 0x7c
'Yun ', # 0x7d
'Mang ', # 0x7e
'Yong ', # 0x7f
'Yong ', # 0x80
'Yuan ', # 0x81
'Pi ', # 0x82
'Kun ', # 0x83
'Qiao ', # 0x84
'Yue ', # 0x85
'Yu ', # 0x86
'Yu ', # 0x87
'Jie ', # 0x88
'Xi ', # 0x89
'Zhe ', # 0x8a
'Lin ', # 0x8b
'Ti ', # 0x8c
'Han ', # 0x8d
'Hao ', # 0x8e
'Qie ', # 0x8f
'Ti ', # 0x90
'Bu ', # 0x91
'Yi ', # 0x92
'Qian ', # 0x93
'Hui ', # 0x94
'Xi ', # 0x95
'Bei ', # 0x96
'Man ', # 0x97
'Yi ', # 0x98
'Heng ', # 0x99
'Song ', # 0x9a
'Quan ', # 0x9b
'Cheng ', # 0x9c
'Hui ', # 0x9d
'Wu ', # 0x9e
'Wu ', # 0x9f
'You ', # 0xa0
'Li ', # 0xa1
'Liang ', # 0xa2
'Huan ', # 0xa3
'Cong ', # 0xa4
'Yi ', # 0xa5
'Yue ', # 0xa6
'Li ', # 0xa7
'Nin ', # 0xa8
'Nao ', # 0xa9
'E ', # 0xaa
'Que ', # 0xab
'Xuan ', # 0xac
'Qian ', # 0xad
'Wu ', # 0xae
'Min ', # 0xaf
'Cong ', # 0xb0
'Fei ', # 0xb1
'Bei ', # 0xb2
'Duo ', # 0xb3
'Cui ', # 0xb4
'Chang ', # 0xb5
'Men ', # 0xb6
'Li ', # 0xb7
'Ji ', # 0xb8
'Guan ', # 0xb9
'Guan ', # 0xba
'Xing ', # 0xbb
'Dao ', # 0xbc
'Qi ', # 0xbd
'Kong ', # 0xbe
'Tian ', # 0xbf
'Lun ', # 0xc0
'Xi ', # 0xc1
'Kan ', # 0xc2
'Kun ', # 0xc3
'Ni ', # 0xc4
'Qing ', # 0xc5
'Chou ', # 0xc6
'Dun ', # 0xc7
'Guo ', # 0xc8
'Chan ', # 0xc9
'Liang ', # 0xca
'Wan ', # 0xcb
'Yuan ', # 0xcc
'Jin ', # 0xcd
'Ji ', # 0xce
'Lin ', # 0xcf
'Yu ', # 0xd0
'Huo ', # 0xd1
'He ', # 0xd2
'Quan ', # 0xd3
'Tan ', # 0xd4
'Ti ', # 0xd5
'Ti ', # 0xd6
'Nie ', # 0xd7
'Wang ', # 0xd8
'Chuo ', # 0xd9
'Bu ', # 0xda
'Hun ', # 0xdb
'Xi ', # 0xdc
'Tang ', # 0xdd
'Xin ', # 0xde
'Wei ', # 0xdf
'Hui ', # 0xe0
'E ', # 0xe1
'Rui ', # 0xe2
'Zong ', # 0xe3
'Jian ', # 0xe4
'Yong ', # 0xe5
'Dian ', # 0xe6
'Ju ', # 0xe7
'Can ', # 0xe8
'Cheng ', # 0xe9
'De ', # 0xea
'Bei ', # 0xeb
'Qie ', # 0xec
'Can ', # 0xed
'Dan ', # 0xee
'Guan ', # 0xef
'Duo ', # 0xf0
'Nao ', # 0xf1
'Yun ', # 0xf2
'Xiang ', # 0xf3
'Zhui ', # 0xf4
'Die ', # 0xf5
'Huang ', # 0xf6
'Chun ', # 0xf7
'Qiong ', # 0xf8
'Re ', # 0xf9
'Xing ', # 0xfa
'Ce ', # 0xfb
'Bian ', # 0xfc
'Hun ', # 0xfd
'Zong ', # 0xfe
'Ti ', # 0xff
)
|
Sorsly/subtle
|
refs/heads/master
|
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dns/v1/resources.py
|
6
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://www.googleapis.com/dns/v1/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
CHANGES = (
'changes',
'projects/{project}/managedZones/{managedZone}/changes/{changeId}',
{},
[u'project', u'managedZone', u'changeId']
)
MANAGEDZONES = (
'managedZones',
'projects/{project}/managedZones/{managedZone}',
{},
[u'project', u'managedZone']
)
PROJECTS = (
'projects',
'projects/{project}',
{},
[u'project']
)
def __init__(self, collection_name, path, flat_paths, params):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyProtectedMemberInspection/trueNegative.py
|
83
|
__author__ = 'ktisha'
class A:
def __init__(self):
self._a = 1
def foo(self):
self.b= 1
class B(A):
def __init__(self):
A.__init__(self)
self.b = self._a
|
piyushroshan/xen-4.3
|
refs/heads/master
|
tools/python/xen/util/xsm/acm/acm.py
|
27
|
#===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 International Business Machines Corp.
# Author: Reiner Sailer
# Author: Bryan D. Payne <bdpayne@us.ibm.com>
# Author: Stefan Berger <stefanb@us.ibm.com>
#============================================================================
import commands
import logging
import os, string, re
import threading
import struct
import stat
import base64
from xen.xend import sxp
from xen.xend import XendConstants
from xen.xend import XendOptions
from xen.xend.XendLogging import log
from xen.xend.XendError import VmError
from xen.util import dictio, xsconstants, auxbin, xpopen
from xen.xend.XendConstants import *
#global directories and tools for security management
install_policy_dir_prefix = auxbin.xen_configdir() + "/acm-security/policies"
security_dir_prefix = XendOptions.instance().get_xend_security_path()
policy_dir_prefix = security_dir_prefix + "/policies"
res_label_filename = policy_dir_prefix + "/resource_labels"
boot_filename = "/boot/grub/menu.lst"
altboot_filename = "/boot/grub/grub.conf"
xensec_tool = "/usr/sbin/xensec_tool"
#global patterns for map file
#police_reference_tagname = "POLICYREFERENCENAME"
primary_entry_re = re.compile("\s*PRIMARY\s+.*", re.IGNORECASE)
secondary_entry_re = re.compile("\s*SECONDARY\s+.*", re.IGNORECASE)
label_template_re = re.compile(".*security_label_template.xml", re.IGNORECASE)
mapping_filename_re = re.compile(".*\.map", re.IGNORECASE)
policy_reference_entry_re = re.compile("\s*POLICYREFERENCENAME\s+.*", re.IGNORECASE)
vm_label_re = re.compile("\s*LABEL->SSID\s.+[VM|ANY]\s+.*", re.IGNORECASE)
res_label_re = re.compile("\s*LABEL->SSID\s+RES\s+.*", re.IGNORECASE)
all_label_re = re.compile("\s*LABEL->SSID\s+.*", re.IGNORECASE)
access_control_re = re.compile("\s*access_control\s*=", re.IGNORECASE)
#global patterns for boot configuration file
xen_title_re = re.compile("\s*title\s+XEN", re.IGNORECASE)
any_title_re = re.compile("\s*title\s", re.IGNORECASE)
xen_kernel_re = re.compile("\s*kernel.*xen.*\.gz", re.IGNORECASE)
kernel_ver_re = re.compile("\s*module.*vmlinuz", re.IGNORECASE)
any_module_re = re.compile("\s*module\s", re.IGNORECASE)
empty_line_re = re.compile("^\s*$")
binary_name_re = re.compile(".*[chwall|ste|chwall_ste].*\.bin", re.IGNORECASE)
policy_name_re = re.compile(".*[chwall|ste|chwall_ste].*", re.IGNORECASE)
#decision hooks known to the hypervisor
ACMHOOK_sharing = 1
ACMHOOK_authorization = 2
ACMHOOK_conflictset = 3
#other global variables
NULL_SSIDREF = 0
#general Rlock for map files; only one lock for all mapfiles
__mapfile_lock = threading.RLock()
__resfile_lock = threading.RLock()
log = logging.getLogger("xend.util.security")
#Functions exported through XML-RPC
xmlrpc_exports = [
'on',
'set_resource_label',
'get_resource_label',
'list_labels',
'get_labeled_resources',
'set_policy',
'reset_policy',
'get_policy',
'activate_policy',
'rm_bootpolicy',
'get_xstype',
'get_domain_label',
'set_domain_label'
]
# Our own exception definition. It is masked (pass) if raised and
# whoever raises this exception must provide error information.
class XSMError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
def err(msg):
"""Raise ACM exception.
"""
raise XSMError(msg)
active_policy = None
def mapfile_lock():
__mapfile_lock.acquire()
def mapfile_unlock():
__mapfile_lock.release()
def resfile_lock():
__resfile_lock.acquire()
def resfile_unlock():
__resfile_lock.release()
def refresh_security_policy():
"""
retrieves security policy
"""
global active_policy
active_policy = 'INACCESSIBLE'
if os.access("/proc/xen/privcmd", os.R_OK|os.W_OK):
active_policy = "INACTIVE"
def get_active_policy_name():
refresh_security_policy()
return active_policy
# now set active_policy
refresh_security_policy()
def on():
"""
returns none if security policy is off (not compiled),
any string otherwise, use it: if not security.on() ...
"""
if get_active_policy_name() not in ['INACTIVE', 'NULL', '']:
return xsconstants.XS_POLICY_ACM
return 0
def calc_dom_ssidref_from_info(info):
"""
Calculate a domain's ssidref from the security_label in its
info.
This function is called before the domain is started and
makes sure that:
- the type of the policy is the same as indicated in the label
- the name of the policy is the same as indicated in the label
- calculates an up-to-date ssidref for the domain
The latter is necessary since the domain's ssidref could have
changed due to changes to the policy.
"""
import xen.xend.XendConfig
if isinstance(info, xen.xend.XendConfig.XendConfig):
if info.has_key('security_label'):
seclab = info['security_label']
tmp = seclab.split(":")
if len(tmp) != 3:
raise VmError("VM label '%s' in wrong format." % seclab)
typ, policyname, vmlabel = seclab.split(":")
if typ != xsconstants.ACM_POLICY_ID:
raise VmError("Policy type '%s' must be changed." % typ)
if get_active_policy_name() != policyname:
raise VmError("Active policy '%s' different than "
"what in VM's label ('%s')." %
(get_active_policy_name(), policyname))
ssidref = label2ssidref(vmlabel, policyname, "dom")
return ssidref
else:
return 0x0
raise VmError("security.calc_dom_ssidref_from_info: info of type '%s'"
"not supported." % type(info))
def getmapfile(policyname):
"""
in: if policyname is None then the currently
active hypervisor policy is used
out: 1. primary policy, 2. secondary policy,
3. open file descriptor for mapping file, and
4. True if policy file is available, False otherwise
"""
if not policyname:
policyname = get_active_policy_name()
map_file_ok = False
primary = None
secondary = None
#strip last part of policy as file name part
policy_dir_list = string.split(policyname, ".")
policy_file = policy_dir_list.pop()
if len(policy_dir_list) > 0:
policy_dir = string.join(policy_dir_list, "/") + "/"
else:
policy_dir = ""
map_filename = policy_dir_prefix + "/" + policy_dir + policy_file + ".map"
# check if it is there, if not check if policy file is there
if not os.path.isfile(map_filename):
policy_filename = policy_dir_prefix + "/" + policy_dir + policy_file + "-security_policy.xml"
if not os.path.isfile(policy_filename):
err("Policy file \'" + policy_filename + "\' not found.")
else:
err("Mapping file \'" + map_filename + "\' not found.")
f = open(map_filename)
for line in f:
if policy_reference_entry_re.match(line):
l = line.split()
if (len(l) == 2) and (l[1] == policyname):
map_file_ok = True
elif primary_entry_re.match(line):
l = line.split()
if len(l) == 2:
primary = l[1]
elif secondary_entry_re.match(line):
l = line.split()
if len(l) == 2:
secondary = l[1]
f.close()
f = open(map_filename)
if map_file_ok and primary and secondary:
return (primary, secondary, f, True)
else:
err("Mapping file inconsistencies found.")
def ssidref2label(ssidref_var):
"""
returns labelname corresponding to ssidref;
maps current policy to default directory
to find mapping file
"""
#1. translated permitted input formats
if isinstance(ssidref_var, str):
ssidref_var.strip()
if ssidref_var[0:2] == "0x":
ssidref = int(ssidref_var[2:], 16)
else:
ssidref = int(ssidref_var)
elif isinstance(ssidref_var, int):
ssidref = ssidref_var
else:
err("Instance type of ssidref not supported (must be of type 'str' or 'int')")
if ssidref == 0:
from xen.util.acmpolicy import ACM_LABEL_UNLABELED
return ACM_LABEL_UNLABELED
try:
mapfile_lock()
(primary, secondary, f, pol_exists) = getmapfile(None)
if not f:
if (pol_exists):
err("Mapping file for policy not found.")
else:
err("Policy file for \'" + get_active_policy_name() +
"\' not found.")
#2. get labelnames for both ssidref parts
pri_ssid = ssidref & 0xffff
sec_ssid = ssidref >> 16
pri_null_ssid = NULL_SSIDREF & 0xffff
sec_null_ssid = NULL_SSIDREF >> 16
pri_labels = []
sec_labels = []
labels = []
for line in f:
l = line.split()
if (len(l) < 5) or (l[0] != "LABEL->SSID"):
continue
if primary and (l[2] == primary) and (int(l[4], 16) == pri_ssid):
pri_labels.append(l[3])
if secondary and (l[2] == secondary) and (int(l[4], 16) == sec_ssid):
sec_labels.append(l[3])
f.close()
finally:
mapfile_unlock()
#3. get the label that is in both lists (combination must be a single label)
if (primary == "CHWALL") and (pri_ssid == pri_null_ssid) and (sec_ssid != sec_null_ssid):
labels = sec_labels
elif (secondary == "CHWALL") and (pri_ssid != pri_null_ssid) and (sec_ssid == sec_null_ssid):
labels = pri_labels
elif secondary == "NULL":
labels = pri_labels
else:
for i in pri_labels:
for j in sec_labels:
if (i==j):
labels.append(i)
if len(labels) != 1:
err("Label for ssidref \'" + str(ssidref) +
"\' unknown or not unique in policy \'" + active_policy + "\'")
return labels[0]
def label2ssidref(labelname, policyname, typ):
"""
returns ssidref corresponding to labelname;
maps current policy to default directory
to find mapping file """
if policyname in ['NULL', 'INACTIVE', 'INACCESSIBLE' ]:
err("Cannot translate labels for \'" + policyname + "\' policy.")
allowed_types = ['ANY']
if typ == 'dom':
allowed_types.append('VM')
elif typ == 'res':
allowed_types.append('RES')
else:
err("Invalid type. Must specify 'dom' or 'res'.")
try:
mapfile_lock()
(primary, secondary, f, pol_exists) = getmapfile(policyname)
#2. get labelnames for ssidref parts and find a common label
pri_ssid = []
sec_ssid = []
for line in f:
l = line.split()
if (len(l) < 5) or (l[0] != "LABEL->SSID"):
continue
if primary and (l[1] in allowed_types) and \
(l[2] == primary) and \
(l[3] == labelname):
pri_ssid.append(int(l[4], 16))
if secondary and (l[1] in allowed_types) and \
(l[2] == secondary) and \
(l[3] == labelname):
sec_ssid.append(int(l[4], 16))
f.close()
if (typ == 'res') and (primary == "CHWALL") and (len(pri_ssid) == 0):
pri_ssid.append(NULL_SSIDREF)
elif (typ == 'res') and (secondary == "CHWALL") and \
(len(sec_ssid) == 0):
sec_ssid.append(NULL_SSIDREF)
#3. sanity check and composition of ssidref
if (len(pri_ssid) == 0) or ((len(sec_ssid) == 0) and \
(secondary != "NULL")):
err("Label \'" + labelname + "\' not found.")
elif (len(pri_ssid) > 1) or (len(sec_ssid) > 1):
err("Label \'" + labelname + "\' not unique in policy (policy error)")
if secondary == "NULL":
return pri_ssid[0]
else:
return (sec_ssid[0] << 16) | pri_ssid[0]
finally:
mapfile_unlock()
def refresh_ssidref(config):
"""
looks up ssidref from security field
and refreshes the value if label exists
"""
#called by dom0, policy could have changed after xen.utils.security was initialized
refresh_security_policy()
security = None
if isinstance(config, dict):
security = config['security']
elif isinstance(config, list):
security = sxp.child_value(config, 'security')
else:
err("Instance type of config parameter not supported.")
if not security:
#nothing to do (no security label attached)
return config
policyname = None
labelname = None
# compose new security field
for idx in range(0, len(security)):
if security[idx][0] == 'ssidref':
security.pop(idx)
break
elif security[idx][0] == 'access_control':
for jdx in [1, 2]:
if security[idx][jdx][0] == 'label':
labelname = security[idx][jdx][1]
elif security[idx][jdx][0] == 'policy':
policyname = security[idx][jdx][1]
else:
err("Illegal field in access_control")
#verify policy is correct
if active_policy != policyname:
err("Policy \'" + str(policyname) +
"\' in label does not match active policy \'"
+ str(active_policy) +"\'!")
new_ssidref = label2ssidref(labelname, policyname, 'dom')
if not new_ssidref:
err("SSIDREF refresh failed!")
security.append([ 'ssidref',str(new_ssidref)])
security = ['security', security ]
for idx in range(0,len(config)):
if config[idx][0] == 'security':
config.pop(idx)
break
config.append(security)
def get_ssid(domain):
"""
enables domains to retrieve the label / ssidref of a running domain
"""
err("No policy active.")
def get_decision(arg1, arg2):
"""
enables domains to retrieve access control decisions from
the hypervisor Access Control Module.
IN: args format = ['domid', id] or ['ssidref', ssidref]
or ['access_control', ['policy', policy], ['label', label], ['type', type]]
"""
err("No policy active.")
def has_authorization(ssidref):
""" Check if the domain with the given ssidref has authorization to
run on this system. To have authoriztion dom0's STE types must
be a superset of that of the domain's given through its ssidref.
"""
return True
def hv_chg_policy(bin_pol, del_array, chg_array):
"""
Change the binary policy in the hypervisor
The 'del_array' and 'chg_array' give hints about deleted ssidrefs
and changed ssidrefs which can be due to deleted VM labels
or reordered VM labels
"""
err("No policy active.")
def hv_get_policy():
"""
Gte the binary policy enforced in the hypervisor
"""
err("No policy active.")
def is_in_conflict(ssidref):
""" Check whether the given ssidref is in conflict with any running
domain.
"""
return False
def set_policy(xs_type, xml, flags, overwrite):
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
xspoladmin = XendXSPolicyAdmin.XSPolicyAdminInstance()
try:
acmpol, rc, errors = \
xspoladmin.add_acmpolicy_to_system(xml,
int(flags),
True)
return rc, base64.b64encode(errors)
except Exception, e:
err(str(e))
def reset_policy():
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
xspoladmin = XendXSPolicyAdmin.XSPolicyAdminInstance()
try:
acmpol, rc, errors = \
xspoladmin.reset_acmpolicy()
return rc, base64.b64encode(errors)
except Exception, e:
err(str(e))
def get_policy():
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
poladmin = XendXSPolicyAdmin.XSPolicyAdminInstance()
try:
policy = poladmin.get_loaded_policy()
if policy != None:
return policy.toxml(), poladmin.get_policy_flags(policy)
except Exception, e:
err(str(e))
return "", 0
def activate_policy(flags):
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
poladmin = XendXSPolicyAdmin.XSPolicyAdminInstance()
try:
policies = poladmin.get_policies()
if len(policies) > 0:
flags = int(flags)
irc = poladmin.activate_xspolicy(policies[0], flags)
return irc
except Exception, e:
err("Error while activating the policy: " % str(e))
return 0
def rm_bootpolicy():
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
rc = XendXSPolicyAdmin.XSPolicyAdminInstance().rm_bootpolicy()
if rc != xsconstants.XSERR_SUCCESS:
err("Error while removing boot policy: %s" % \
str(xsconstants.xserr2string(-rc)))
return rc
def get_xstype():
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendXSPolicyAdmin
return XendXSPolicyAdmin.XSPolicyAdminInstance().isXSEnabled()
def get_domain_label(domain):
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendDomain
dom = XendDomain.instance().domain_lookup_nr(domain)
if dom:
seclab = dom.get_security_label()
return seclab
else:
err("Domain not found.")
def set_domain_label(domain, seclab, old_seclab):
"""
Xend exports this function via XML-RPC
"""
from xen.xend import XendDomain
dom = XendDomain.instance().domain_lookup_nr(domain)
if dom:
results = dom.set_security_label(seclab, old_seclab)
rc, errors, old_label, new_ssidref = results
return rc, new_ssidref
else:
err("Domain not found.")
def dump_policy():
if active_policy in ['NULL', 'INACTIVE', 'INACCESSIBLE' ]:
err("\'" + active_policy + "\' policy. Nothing to dump.")
(ret, output) = commands.getstatusoutput(xensec_tool + " getpolicy")
if ret:
err("Dumping hypervisor policy failed:\n" + output)
print output
def dump_policy_file(filename, ssidref=None):
ssid = ""
if ssidref:
ssid = " " + str(ssidref)
(ret, output) = commands.getstatusoutput(xensec_tool + " dumppolicy " +
filename + ssid)
if ret:
err("Dumping policy failed:\n" + output)
print output
def list_labels(policy_name, ltype):
"""
Xend exports this function via XML-RPC
List the VM,resource or any kind of labels contained in the
given policy. If no policy name is given, the currently
active policy's label will be returned if they exist.
"""
if not policy_name:
if active_policy in [ 'NULL', 'INACTIVE', "" ]:
err("Current policy \'" + active_policy + "\' "
"has no labels defined.\n")
if not ltype or ltype == 'dom':
condition = vm_label_re
elif ltype == 'res':
condition = res_label_re
elif ltype == 'any':
condition = all_label_re
else:
err("Unknown label type \'" + ltype + "\'")
try:
mapfile_lock()
(primary, secondary, f, pol_exists) = getmapfile(policy_name)
if not f:
if pol_exists:
err("Cannot find mapfile for policy \'" + policy_name + "\'.\n")
else:
err("Unknown policy \'" + policy_name + "\'")
labels = []
for line in f:
if condition.match(line):
label = line.split()[3]
if label not in labels:
labels.append(label)
finally:
mapfile_unlock()
if '__NULL_LABEL__' in labels:
labels.remove('__NULL_LABEL__')
return labels
def get_res_label(resource):
"""Returns resource label information (policytype, label, policy) if
it exists. Otherwise returns null label and policy.
"""
def default_res_label():
ssidref = NULL_SSIDREF
if on():
label = ssidref2label(ssidref)
else:
label = None
return (xsconstants.ACM_POLICY_ID, 'NULL', label)
tmp = get_resource_label(resource)
if len(tmp) == 2:
policytype = xsconstants.ACM_POLICY_ID
policy, label = tmp
elif len(tmp) == 3:
policytype, policy, label = tmp
else:
policytype, policy, label = default_res_label()
return (policytype, label, policy)
def get_res_security_details(resource):
"""Returns the (label, ssidref, policy) associated with a given
resource from the global resource label file.
"""
def default_security_details():
ssidref = NULL_SSIDREF
if on():
label = ssidref2label(ssidref)
else:
label = None
policy = active_policy
return (label, ssidref, policy)
# find the entry associated with this resource
(policytype, label, policy) = get_res_label(resource)
if policy == 'NULL':
log.info("Resource label for "+resource+" not in file, using DEFAULT.")
return default_security_details()
if policytype != xsconstants.ACM_POLICY_ID:
raise VmError("Unknown policy type '%s in label for resource '%s'" %
(policytype, resource))
# is this resource label for the running policy?
if policy == active_policy:
ssidref = label2ssidref(label, policy, 'res')
elif label == xsconstants.XS_INACCESSIBLE_LABEL:
ssidref = NULL_SSIDREF
else:
log.info("Resource label not for active policy, using DEFAULT.")
return default_security_details()
return (label, ssidref, policy)
def security_label_to_details(seclab):
""" Convert a Xen-API type of security label into details """
def default_security_details():
ssidref = NULL_SSIDREF
if on():
label = ssidref2label(ssidref)
else:
label = None
policy = active_policy
return (label, ssidref, policy)
(policytype, policy, label) = seclab.split(":")
# is this resource label for the running policy?
if policy == active_policy:
ssidref = label2ssidref(label, policy, 'res')
else:
log.info("Resource label not for active policy, using DEFAULT.")
return default_security_details()
return (label, ssidref, policy)
def unify_resname(resource, mustexist=True):
"""Makes all resource locations absolute. In case of physical
resources, '/dev/' is added to local file names"""
if not resource:
return resource
# sanity check on resource name
try:
(typ, resfile) = resource.split(":", 1)
except:
err("Resource spec '%s' contains no ':' delimiter" % resource)
if typ == "tap":
try:
(subtype, resfile) = resfile.split(":")
except:
err("Resource spec '%s' contains no tap subtype" % resource)
if typ in ["phy"]:
if not resfile.startswith("/"):
resfile = "/dev/" + resfile
if mustexist:
resfile = os.path.realpath(resfile)
try:
stats = os.lstat(resfile)
if not (stat.S_ISBLK(stats[stat.ST_MODE])):
err("Invalid resource")
except:
err("Invalid resource")
if typ in [ "file", "tap" ]:
resfile = os.path.realpath(resfile)
if mustexist and not os.path.isfile(resfile):
err("Invalid resource")
if typ == "vlan":
try:
vlan = int(resfile)
if vlan < 1 or vlan > 4095:
err("VLAN ID %d out of range." % vlan)
except Exception, e:
err("Invalid VLAN : %s" % resfile)
#file: resources must be specified with absolute path
#vlan resources don't start with '/'
if typ != "vlan":
if (not resfile.startswith("/")) or \
(mustexist and not os.path.exists(resfile)):
err("Invalid resource.")
# from here on absolute file names with resources
if typ == "tap":
typ = typ + ":" + subtype
resource = typ + ":" + resfile
return resource
def res_security_check(resource, domain_label):
"""Checks if the given resource can be used by the given domain
label. Returns 1 if the resource can be used, otherwise 0.
"""
rtnval = 1
# if security is on, ask the hypervisor for a decision
if on():
#build canonical resource name
resource = unify_resname(resource)
(label, ssidref, policy) = get_res_security_details(resource)
domac = ['access_control']
domac.append(['policy', active_policy])
domac.append(['label', domain_label])
domac.append(['type', 'dom'])
decision = get_decision(domac, ['ssidref', str(ssidref)])
# provide descriptive error messages
if decision == 'DENIED':
if label == ssidref2label(NULL_SSIDREF):
raise XSMError("Resource '"+resource+"' is not labeled")
rtnval = 0
else:
raise XSMError("Permission denied for resource '"+resource+"' because label '"+label+"' is not allowed")
rtnval = 0
# security is off, make sure resource isn't labeled
else:
# Note, we can't canonicalise the resource here, because people using
# xm without ACM are free to use relative paths.
(policytype, label, policy) = get_res_label(resource)
if policy != 'NULL':
raise XSMError("Security is off, but '"+resource+"' is labeled")
rtnval = 0
return rtnval
def res_security_check_xapi(rlabel, rssidref, rpolicy, xapi_dom_label):
"""Checks if the given resource can be used by the given domain
label. Returns 1 if the resource can be used, otherwise 0.
"""
rtnval = 1
# if security is on, ask the hypervisor for a decision
if on():
if rlabel == xsconstants.XS_INACCESSIBLE_LABEL:
return 0
typ, dpolicy, domain_label = xapi_dom_label.split(":")
if not dpolicy or not domain_label:
raise VmError("VM security label in wrong format.")
if active_policy != rpolicy:
raise VmError("Resource's policy '%s' != active policy '%s'" %
(rpolicy, active_policy))
domac = ['access_control']
domac.append(['policy', active_policy])
domac.append(['label', domain_label])
domac.append(['type', 'dom'])
decision = get_decision(domac, ['ssidref', str(rssidref)])
log.info("Access Control Decision : %s" % decision)
# provide descriptive error messages
if decision == 'DENIED':
if rlabel == ssidref2label(NULL_SSIDREF):
#raise XSMError("Resource is not labeled")
rtnval = 0
else:
#raise XSMError("Permission denied for resource because label '"+rlabel+"' is not allowed")
rtnval = 0
# security is off, make sure resource isn't labeled
else:
# Note, we can't canonicalise the resource here, because people using
# xm without ACM are free to use relative paths.
if rpolicy != 'NULL':
#raise XSMError("Security is off, but resource is labeled")
rtnval = 0
return rtnval
def validate_label_xapi(xapi_label, dom_or_res):
"""
Make sure that this label is part of the currently enforced policy
and that it references the current policy.
dom_or_res defines whether this is a VM ('res') or resource label
('res')
"""
tmp = xapi_label.split(":")
if len(tmp) != 3:
return -xsconstants.XSERR_BAD_LABEL_FORMAT
policytyp, policyref, label = tmp
return validate_label(policytyp, policyref, label, dom_or_res)
def validate_label(policytype, policyref, label, dom_or_res):
"""
Make sure that this label is part of the currently enforced policy
and that it reference the current policy.
"""
if policytype != xsconstants.ACM_POLICY_ID:
return -xsconstants.XSERR_WRONG_POLICY_TYPE
if not policytype or not label:
return -xsconstants.XSERR_BAD_LABEL_FORMAT
rc = xsconstants.XSERR_SUCCESS
if label == xsconstants.XS_INACCESSIBLE_LABEL:
return rc
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
curpol = XSPolicyAdminInstance().get_loaded_policy()
if not curpol or curpol.get_name() != policyref:
rc = -xsconstants.XSERR_BAD_LABEL
else:
try:
label2ssidref(label, curpol.get_name() , dom_or_res)
except:
rc = -xsconstants.XSERR_BAD_LABEL
return rc
def set_resource_label_xapi(resource, reslabel_xapi, oldlabel_xapi):
"""Assign a resource label to a resource
@param resource: The name of a resource, i.e., "phy:/dev/hda", or
"tap:qcow:/path/to/file.qcow"
@param reslabel_xapi: A resource label foramtted as in all other parts of
the Xen-API, i.e., ACM:xm-test:blue"
@rtype: int
@return Success (0) or failure value (< 0)
"""
olabel = ""
if reslabel_xapi == "":
return rm_resource_label(resource, oldlabel_xapi)
rc = validate_label_xapi(reslabel_xapi, 'res')
if rc != xsconstants.XSERR_SUCCESS:
return rc
if oldlabel_xapi not in [ "" ]:
tmp = oldlabel_xapi.split(":")
if len(tmp) != 3:
return -xsconstants.XSERR_BAD_LABEL_FORMAT
otyp, opolicyref, olabel = tmp
# Only ACM is supported
if otyp != xsconstants.ACM_POLICY_ID and \
otyp != xsconstants.INVALID_POLICY_PREFIX + \
xsconstants.ACM_POLICY_ID:
return -xsconstants.XSERR_WRONG_POLICY_TYPE
typ, policyref, label = reslabel_xapi.split(":")
return set_resource_label(resource, typ, policyref, label, olabel)
def is_resource_in_use(resource):
"""
Domain-0 'owns' resources of type 'VLAN', the rest are owned by
the guests.
"""
from xen.xend import XendDomain
lst = []
if resource.startswith('vlan'):
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
curpol = XSPolicyAdminInstance().get_loaded_policy()
policytype, label, policy = get_res_label(resource)
if curpol and \
policytype == xsconstants.ACM_POLICY_ID and \
policy == curpol.get_name() and \
label in curpol.policy_get_resourcelabel_names():
# VLAN is in use.
lst.append(XendDomain.instance().
get_vm_by_uuid(XendDomain.DOM0_UUID))
else:
dominfos = XendDomain.instance().list('all')
for dominfo in dominfos:
if is_resource_in_use_by_dom(dominfo, resource):
lst.append(dominfo)
return lst
def devices_equal(res1, res2, mustexist=True):
""" Determine whether two devices are equal """
return (unify_resname(res1, mustexist) ==
unify_resname(res2, mustexist))
def is_resource_in_use_by_dom(dominfo, resource):
""" Determine whether a resources is in use by a given domain
@return True or False
"""
if not dominfo.domid:
return False
if dominfo._stateGet() not in [ DOM_STATE_RUNNING ]:
return False
devs = dominfo.info['devices']
uuids = devs.keys()
for uuid in uuids:
dev = devs[uuid]
if len(dev) >= 2 and dev[1].has_key('uname'):
# dev[0] is type, i.e. 'vbd'
if devices_equal(dev[1]['uname'], resource, mustexist=False):
log.info("RESOURCE IN USE: Domain %d uses %s." %
(dominfo.domid, resource))
return True
return False
def get_domain_resources(dominfo):
""" Collect all resources of a domain in a map where each entry of
the map is a list.
Entries are strored in the following formats:
tap:qcow:/path/xyz.qcow
"""
resources = { 'vbd' : [], 'tap' : [], 'vif' : []}
devs = dominfo.info['devices']
uuids = devs.keys()
for uuid in uuids:
dev = devs[uuid]
typ = dev[0]
if typ in [ 'vbd', 'tap' ]:
resources[typ].append(dev[1]['uname'])
if typ in [ 'vif' ]:
sec_lab = dev[1].get('security_label')
if sec_lab:
resources[typ].append(sec_lab)
else:
# !!! This should really get the label of the domain
# or at least a resource label that has the same STE type
# as the domain has
from xen.util.acmpolicy import ACM_LABEL_UNLABELED
resources[typ].append("%s:%s:%s" %
(xsconstants.ACM_POLICY_ID,
active_policy,
ACM_LABEL_UNLABELED))
return resources
def resources_compatible_with_vmlabel(xspol, dominfo, vmlabel):
"""
Check whether the resources' labels are compatible with the
given VM label. This is a function to be used when for example
a running domain is to get the new label 'vmlabel'
"""
if not xspol:
return False
try:
resfile_lock()
try:
access_control = dictio.dict_read("resources",
res_label_filename)
except:
# No labeled resources -> must be compatible
return True
return __resources_compatible_with_vmlabel(xspol, dominfo, vmlabel,
access_control)
finally:
resfile_unlock()
return False
def __resources_compatible_with_vmlabel(xspol, dominfo, vmlabel,
access_control,
is_policy_update=False):
"""
Check whether the resources' labels are compatible with the
given VM label. The access_control parameter provides a
dictionary of the resource name to resource label mappings
under which the evaluation should be done.
Call this only for a paused or running domain.
"""
def collect_labels(reslabels, s_label, polname):
if len(s_label) != 3 or polname != s_label[1]:
return False
label = s_label[2]
if not label in reslabels:
reslabels.append(label)
return True
resources = get_domain_resources(dominfo)
reslabels = [] # all resource labels
polname = xspol.get_name()
for key, value in resources.items():
if key in [ 'vbd', 'tap' ]:
for res in resources[key]:
if not res in access_control:
label = [xsconstants.ACM_POLICY_ID,
xspol.get_name(),
ACM_LABEL_UNLABELED]
else:
label = access_control[res]
if not collect_labels(reslabels, label, polname):
return False
elif key in [ 'vif' ]:
for xapi_label in value:
label = xapi_label.split(":")
from xen.util.acmpolicy import ACM_LABEL_UNLABELED
if not (is_policy_update and \
label[2] == ACM_LABEL_UNLABELED):
if not collect_labels(reslabels, label, polname):
return False
else:
log.error("Unhandled device type: %s" % key)
return False
# Check that all resource labes have a common STE type with the
# vmlabel
if len(reslabels) > 0:
rc = xspol.policy_check_vmlabel_against_reslabels(vmlabel, reslabels)
else:
rc = True
log.info("vmlabel=%s, reslabels=%s, rc=%s" %
(vmlabel, reslabels, str(rc)))
return rc;
def set_resource_label(resource, policytype, policyref, reslabel, \
oreslabel = None):
"""
Xend exports this function via XML-RPC.
Assign a label to a resource
If the old label (oreslabel) is given, then the resource must have
that old label.
A resource label may be changed if
- the resource is not in use
@param resource : The name of a resource, i.e., "phy:/dev/hda"
@param policyref : The name of the policy
@param reslabel : the resource label within the policy
@param oreslabel : optional current resource label
@rtype: int
@return Success (0) or failure value (< 0)
"""
try:
resource = unify_resname(resource, mustexist=False)
except Exception:
return -xsconstants.XSERR_BAD_RESOURCE_FORMAT
try:
resfile_lock()
mapfile_lock()
if reslabel not in [ '', xsconstants.XS_INACCESSIBLE_LABEL ]:
ssidref = label2ssidref(reslabel, policyref, 'res')
domains = is_resource_in_use(resource)
if len(domains) > 0:
return -xsconstants.XSERR_RESOURCE_IN_USE
access_control = {}
try:
access_control = dictio.dict_read("resources", res_label_filename)
except:
pass
if oreslabel:
if not access_control.has_key(resource):
return -xsconstants.XSERR_BAD_LABEL
tmp = access_control[resource]
if len(tmp) != 3:
return -xsconstants.XSERR_BAD_LABEL
if tmp[2] != oreslabel:
return -xsconstants.XSERR_BAD_LABEL
if resource.startswith('vlan:'):
for key, value in access_control.items():
if value == tuple([policytype, policyref, reslabel]) and \
key.startswith('vlan:'):
return -xsconstants.XSERR_BAD_LABEL
if reslabel == xsconstants.XS_INACCESSIBLE_LABEL:
policytype = xsconstants.ACM_POLICY_ID
policyref = '*'
if reslabel != "":
new_entry = { resource : tuple([policytype, policyref, reslabel])}
access_control.update(new_entry)
command = "add"
reslbl = ":".join([policytype, policyref, reslabel])
else:
if access_control.has_key(resource):
del access_control[resource]
command = "remove"
reslbl = ""
run_resource_label_change_script(resource, reslbl, command)
dictio.dict_write(access_control, "resources", res_label_filename)
finally:
resfile_unlock()
mapfile_unlock()
return xsconstants.XSERR_SUCCESS
def rm_resource_label(resource, oldlabel_xapi):
"""Remove a resource label from a physical resource
@param resource: The name of a resource, i.e., "phy:/dev/hda"
@rtype: int
@return Success (0) or failure value (< 0)
"""
tmp = oldlabel_xapi.split(":")
if len(tmp) != 3:
return -xsconstants.XSERR_BAD_LABEL_FORMAT
otyp, opolicyref, olabel = tmp
# Only ACM is supported
if otyp != xsconstants.ACM_POLICY_ID and \
otyp != xsconstants.INVALID_POLICY_PREFIX + xsconstants.ACM_POLICY_ID:
return -xsconstants.XSERR_WRONG_POLICY_TYPE
return set_resource_label(resource, "", "", "", olabel)
def get_resource_label_xapi(resource):
"""Get the assigned resource label of a physical resource
in the format used by then Xen-API, i.e., "ACM:xm-test:blue"
@rtype: string
@return the string representing policy type, policy name and label of
the resource
"""
res = get_resource_label(resource)
return format_resource_label(res)
def format_resource_label(res):
if res:
if len(res) == 2:
return xsconstants.ACM_POLICY_ID + ":" + res[0] + ":" + res[1]
if len(res) == 3:
return ":".join(res)
return ""
def get_resource_label(resource):
"""
Xend exports this function via XML-RPC.
Get the assigned resource label of a given resource
@param resource: The name of a resource, i.e., "phy:/dev/hda"
@rtype: list
@return tuple of (policy name, resource label), i.e., (xm-test, blue)
"""
try:
resource = unify_resname(resource, mustexist=False)
except Exception:
return []
reslabel_map = get_labeled_resources()
if reslabel_map.has_key(resource):
return list(reslabel_map[resource])
else:
#Try to resolve each label entry
for key, value in reslabel_map.items():
try:
if resource == unify_resname(key):
return list(value)
except:
pass
return []
def get_labeled_resources_xapi():
""" Get a map of all labeled resource with the labels formatted in the
xen-api resource label format.
"""
reslabel_map = get_labeled_resources()
for key, labeldata in reslabel_map.items():
reslabel_map[key] = format_resource_label(labeldata)
return reslabel_map
def get_labeled_resources():
"""
Xend exports this function via XML-RPC
Get a map of all labeled resources.
@rtype: list
@return list of labeled resources
"""
try:
resfile_lock()
try:
access_control = dictio.dict_read("resources", res_label_filename)
except:
return {}
finally:
resfile_unlock()
return access_control
def relabel_domains(relabel_list):
"""
Relabel the given domains to have a new ssidref.
@param relabel_list: a list containing tuples of domid, ssidref
example: [ [0, 0x00020002] ]
"""
rc = -xsconstants.XSERR_GENERAL_FAILURE
errors = ""
return rc, errors
def __update_label_policy_change(sec_lab,
cur_poltype,
cur_polname,
new_poltype,
new_polname,
polnew_labels,
label_map):
"""
Determine a new resource label given the new policy's type
and name and the new policy's (resource/VM) labels and the
(resource/VM) label map that indicates renaming rules for
labels.
"""
is_deleted = False
policytype, policy, label = sec_lab
if cur_poltype != policytype or \
cur_polname != policy:
return sec_lab, is_deleted
if policytype != xsconstants.ACM_POLICY_ID:
return sec_lab, is_deleted
elif label_map.has_key(label) and policy == cur_polname:
# renaming of an active label; policy may have been renamed
label = label_map[label]
polname = new_polname
elif label not in polnew_labels:
# label been removed
policytype = xsconstants.INVALID_POLICY_PREFIX + policytype
polname = policy
is_deleted = True
else:
# no change to label
policytype = xsconstants.ACM_POLICY_ID
polname = new_polname
return tuple( [ policytype, polname, label ] ), is_deleted
def change_acm_policy(bin_pol, del_array, chg_array,
vmlabel_map, reslabel_map, cur_acmpol, new_acmpol,
is_reset):
"""
Change the ACM policy of the system by relabeling
domains and resources first and doing some access checks.
Then update the policy in the hypervisor. If this is all successful,
relabel the domains permanently and commit the relabed resources.
Need to do / check the following:
- relabel all resources where there is a 'from' field in
the policy. [ NOT DOING THIS: and mark those as unlabeled where the label
does not appear in the new policy anymore (deletion) ]
- relabel all VMs where there is a 'from' field in the
policy and mark those as unlabeled where the label
does not appear in the new policy anymore; no running
or paused VM may be unlabeled through this
- check that under the new labeling conditions the VMs
still have access to their resources as before. Unlabeled
resources are inaccessible. If this check fails, the
update failed.
- Attempt changes in the hypervisor; if this step fails,
roll back the relabeling of resources and VMs
- Make the relabeling of resources and VMs permanent
This function should be called with the lock to the domains
held (XendDomain.instance().domains_lock)
"""
from xen.util.acmpolicy import ACM_LABEL_UNLABELED
rc = xsconstants.XSERR_SUCCESS
domain_label_map = {}
new_policyname = new_acmpol.get_name()
new_policytype = new_acmpol.get_type_name()
cur_policyname = cur_acmpol.get_name()
cur_policytype = cur_acmpol.get_type_name()
polnew_reslabels = new_acmpol.policy_get_resourcelabel_names()
errors=""
try:
resfile_lock()
mapfile_lock()
# Get all domains' dominfo.
from xen.xend import XendDomain
dominfos = XendDomain.instance().list('all')
log.info("----------------------------------------------")
label_changes = []
# relabel resources
access_control = {}
try:
access_control = dictio.dict_read("resources", res_label_filename)
except:
pass
for key, labeldata in access_control.items():
if len(labeldata) == 2:
policy, label = labeldata
policytype = xsconstants.ACM_POLICY_ID
elif len(labeldata) == 3:
policytype, policy, label = labeldata
else:
return -xsconstants.XSERR_BAD_LABEL_FORMAT, ""
new_sec_lab, is_deleted = \
__update_label_policy_change( tuple([policytype,
policy,
label]),
cur_policytype,
cur_policyname,
new_policytype,
new_policyname,
polnew_reslabels,
reslabel_map)
if is_deleted:
label_changes.append(key)
# Update entry
access_control[key] = new_sec_lab
# All resources have new labels in the access_control map
# There may still be labels in there that are invalid now.
# Do this in memory without writing to disk:
# - Relabel all domains independent of whether they are running
# or not
# - later write back to config files
polnew_vmlabels = new_acmpol.policy_get_virtualmachinelabel_names()
for dominfo in dominfos:
sec_lab = dominfo.get_security_label()
if not sec_lab:
continue
policytype, policy, vmlabel = sec_lab.split(":")
name = dominfo.getName()
if policytype != cur_policytype or \
policy != cur_policyname:
continue
new_vmlabel = vmlabel
if vmlabel_map.has_key(vmlabel) and \
(not is_reset or name == "Domain-0") :
# renaming of the label; this is only allowed if it's
# not a reset of the policy or if it is a reset, then
# only for Domain-0
new_vmlabel = vmlabel_map[vmlabel]
polname = new_policyname
elif new_vmlabel not in polnew_vmlabels and \
vmlabel != ACM_LABEL_UNLABELED:
# removal of VM label and not the 'unlabeled' label
policytype = xsconstants.INVALID_POLICY_PREFIX + policytype
polname = policy
else:
polname = new_policyname
new_seclab = "%s:%s:%s" % \
(policytype, polname, new_vmlabel)
domain_label_map[dominfo] = [ sec_lab, new_seclab ]
if dominfo._stateGet() in (DOM_STATE_PAUSED, DOM_STATE_RUNNING):
compatible = __resources_compatible_with_vmlabel(new_acmpol,
dominfo,
new_vmlabel,
access_control,
is_policy_update=True)
log.info("Domain %s with new label '%s' can access its "
"resources? : %s" %
(name, new_vmlabel, str(compatible)))
log.info("VM labels in new policy: %s" %
new_acmpol.policy_get_virtualmachinelabel_names())
if not compatible:
return (-xsconstants.XSERR_RESOURCE_ACCESS, "")
for dominfo in dominfos:
# relabel the VIF interfaces
changed = False
for vif_uuid in dominfo.get_vifs():
sec_lab = dominfo.info['devices'][vif_uuid][1]\
.get('security_label')
if sec_lab:
result, _ = \
__update_label_policy_change(tuple(sec_lab.split(':')),
cur_policytype,
cur_policyname,
new_policytype,
new_policyname,
polnew_reslabels,
reslabel_map)
new_sec_lab = ':'.join(list(result))
if new_sec_lab != sec_lab:
changed = True
dominfo.info['devices'][vif_uuid][1]\
['security_label'] = new_sec_lab
if changed:
XendDomain.instance().managed_config_save(dominfo)
rc, errors = hv_chg_policy(bin_pol, del_array, chg_array)
if rc == 0:
for key in label_changes:
run_resource_label_change_script(key, "", "remove")
# Write the relabeled resources back into the file
dictio.dict_write(access_control, "resources", res_label_filename)
# Properly update all VMs to their new labels
for dominfo, labels in domain_label_map.items():
sec_lab, new_seclab = labels
if sec_lab != new_seclab:
log.info("Updating domain %s to new label '%s'." % \
(dominfo.getName(), new_seclab))
# This better be working!
res = dominfo.set_security_label(new_seclab,
sec_lab,
new_acmpol,
cur_acmpol)
if res[0] != xsconstants.XSERR_SUCCESS:
log.info("ERROR: Could not chg label on domain %s: %s" %
(dominfo.getName(),
xsconstants.xserr2string(-int(res[0]))))
finally:
log.info("----------------------------------------------")
mapfile_unlock()
resfile_unlock()
return rc, errors
def parse_security_label(security_label):
tmp = security_label.split(":")
if len(tmp) != 3:
return ""
else:
return security_label
def set_security_label(policy, label):
if label and policy and label != "" and policy != "":
return "%s:%s:%s" % (xsconstants.ACM_POLICY_ID, policy, label)
else:
return ""
def ssidref2security_label(ssidref):
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
return XSPolicyAdminInstance().ssidref_to_vmlabel(ssidref)
def get_security_label(self, xspol=None):
"""
Get the security label of a domain
@param xspol The policy to use when converting the ssid into
a label; only to be passed during the updating
of the policy
"""
domid = self.getDomid()
if not xspol:
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
xspol = XSPolicyAdminInstance().get_loaded_policy()
label = ""
if xspol:
label = xspol.policy_get_domain_label_formatted(domid)
if domid != 0:
label = self.info.get('security_label', label)
return label
def check_can_run(sec_label):
""" Check whether a VM could run, given its vm label. A VM can run if
- it is authorized
- is not in conflict with any running domain
"""
try:
mapfile_lock()
if sec_label == None or sec_label == "":
vm_label = ACM_LABEL_UNLABELED
else:
poltype, policy, vm_label = sec_label.split(':')
if policy != get_active_policy_name():
return -xsconstants.XSERR_BAD_POLICY_NAME
ssidref = label2ssidref(vm_label, policy, 'dom')
if ssidref != xsconstants.INVALID_SSIDREF:
if not has_authorization(ssidref):
return -xsconstants.XSERR_VM_NOT_AUTHORIZED
if is_in_conflict(ssidref):
return -xsconstants.XSERR_VM_IN_CONFLICT
return -xsconstants.XSERR_SUCCESS
else:
return -xsconstants.XSERR_BAD_LABEL
finally:
mapfile_unlock()
__cond = threading.Condition()
__script_runner = None
__orders = []
def run_resource_label_change_script(resource, label, command):
global __cond, __orders, __script_runner
def __run_resource_label_change_script():
global __cond, __orders
script = XendOptions.instance().get_resource_label_change_script()
if script:
parms = {}
while True:
__cond.acquire()
if len(__orders) == 0:
__cond.wait()
parms['label'], \
parms['command'], \
parms['resource'] = __orders[0]
__orders = __orders[1:]
__cond.release()
log.info("Running resource label change script %s: %s" %
(script, parms))
parms.update(os.environ)
xpopen.call(" ".join(script, params))
else:
log.info("No script given for relabeling of resources.")
if not __script_runner:
__script_runner = \
threading.Thread(target=__run_resource_label_change_script,
args=())
__script_runner.start()
__cond.acquire()
__orders.append((label,command,resource))
__cond.notify()
__cond.release()
|
imruahmed/microblog
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/operations/freeze.py
|
284
|
from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False,
wheel_cache=None):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match.search(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
|
nhlfr/virtlet
|
refs/heads/master
|
image_skel/cleanup.py
|
2
|
#!/usr/bin/env python
# Copyright 2016 Mirantis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import traceback
import libvirt
def destroy_domain(domain):
if domain.state() != libvirt.VIR_DOMAIN_RUNNING:
return
try:
domain.destroy()
except libvirt.libvirtError:
sys.stderr.write("Failed to destroy VM %s\n" % domain.name())
traceback.print_exc(file=sys.stderr)
sys.exit(1)
def undefine_domain(domain):
try:
domain.undefine()
except libvirt.libvirtError:
sys.stderr.write("Failed to undefine VM %s\n" % domain.name())
traceback.print_exc(file=sys.stderr)
sys.exit(1)
def cleanup_volumes(conn):
try:
pool = conn.storagePoolLookupByName("default")
except libvirt.libvirtError:
return
volumes = pool.listAllVolumes()
print("Cleaning up volumes")
for volume in volumes:
volume_name = volume.name()
print("Deleting volume", volume_name)
if volume.delete() < 0:
sys.stderr.write("Failed to remove volume %s\n" % volume_name)
sys.exit(1)
print("All volumes cleaned")
def main():
conn = libvirt.open("qemu:///system")
domains = conn.listAllDomains()
print("Cleaning up VMs")
for domain in domains:
print("Destroying VM", domain.name())
destroy_domain(domain)
print("Undefining VM", domain.name())
undefine_domain(domain)
print("All VMs cleaned")
cleanup_volumes(conn)
conn.close()
if __name__ == "__main__":
main()
|
ABaldwinHunter/django-clone-classic
|
refs/heads/master
|
tests/migrations/test_migrations/0001_initial.py
|
266
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
),
migrations.AlterUniqueTogether(
name='author',
unique_together=set([('name', 'slug')]),
),
]
|
bliz937/kivy
|
refs/heads/master
|
kivy/uix/effectwidget.py
|
20
|
'''
EffectWidget
============
.. versionadded:: 1.9.0
This code is still experimental, and its API is subject to change in a
future version.
The :class:`EffectWidget` is able to apply a variety of fancy
graphical effects to
its children. It works by rendering to a series of
:class:`~kivy.graphics.Fbo` instances with custom opengl fragment shaders.
As such, effects can freely do almost anything, from inverting the
colors of the widget, to anti-aliasing, to emulating the appearance of a
crt monitor!
The basic usage is as follows::
w = EffectWidget()
w.add_widget(Button(text='Hello!')
w.effects = [InvertEffect(), HorizontalBlurEffect(size=2.0)]
The equivalent in kv would be::
#: import ew kivy.uix.effectwidget
EffectWidget:
effects: ew.InvertEffect(), ew.HorizontalBlurEffect(size=2.0)
Button:
text: 'Hello!'
The effects can be a list of effects of any length, and they will be
applied sequentially.
The module comes with a range of prebuilt effects, but the interface
is designed to make it easy to create your own. Instead of writing a
full glsl shader, you provide a single function that takes
some inputs based on the screen (current pixel color, current widget
texture etc.). See the sections below for more information.
Usage Guidelines
----------------
It is not efficient to resize an :class:`EffectWidget`, as
the :class:`~kivy.graphics.Fbo` is recreated on each resize event.
If you need to resize frequently, consider doing things a different
way.
Although some effects have adjustable parameters, it is
*not* efficient to animate these, as the entire
shader is reconstructed every time. You should use glsl
uniform variables instead. The :class:`AdvancedEffectBase`
may make this easier.
.. note:: The :class:`EffectWidget` *cannot* draw outside its own
widget area (pos -> pos + size). Any child widgets
overlapping the boundary will be cut off at this point.
Provided Effects
----------------
The module comes with several pre-written effects. Some have
adjustable properties (e.g. blur radius). Please see the individual
effect documentation for more details.
- :class:`MonochromeEffect` - makes the widget grayscale.
- :class:`InvertEffect` - inverts the widget colors.
- :class:`ChannelMixEffect` - swaps color channels.
- :class:`ScanlinesEffect` - displays flickering scanlines.
- :class:`PixelateEffect` - pixelates the image.
- :class:`HorizontalBlurEffect` - Gaussuan blurs horizontally.
- :class:`VerticalBlurEffect` - Gaussuan blurs vertically.
- :class:`FXAAEffect` - applies a very basic anti-aliasing.
Creating Effects
----------------
Effects are designed to make it easy to create and use your own
transformations. You do this by creating and using an instance of
:class:`EffectBase` with your own custom :attr:`EffectBase.glsl`
property.
The glsl property is a string representing part of a glsl fragment
shader. You can include as many functions as you like (the string
is simply spliced into the whole shader), but it
must implement a function :code:`effect` as below::
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
// ... your code here
return something; // must be a vec4 representing the new color
}
The full shader will calculate the normal pixel color at each point,
then call your :code:`effect` function to transform it. The
parameters are:
- **color**: The normal color of the current pixel (i.e. texture
sampled at tex_coords).
- **texture**: The texture containing the widget's normal background.
- **tex_coords**: The normal texture_coords used to access texture.
- **coords**: The pixel indices of the current pixel.
The shader code also has access to two useful uniform variables,
:code:`time` containing the time (in seconds) since the program start,
and :code:`resolution` containing the shape (x pixels, y pixels) of
the widget.
For instance, the following simple string (taken from the `InvertEffect`)
would invert the input color but set alpha to 1.0::
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return vec4(1.0 - color.xyz, 1.0);
}
You can also set the glsl by automatically loading the string from a
file, simply set the :attr:`EffectBase.source` property of an effect.
'''
from kivy.clock import Clock
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import (StringProperty, ObjectProperty, ListProperty,
NumericProperty, DictProperty)
from kivy.graphics import (RenderContext, Fbo, Color, Rectangle,
Translate, PushMatrix, PopMatrix, ClearColor,
ClearBuffers)
from kivy.event import EventDispatcher
from kivy.base import EventLoop
from kivy.resources import resource_find
__all__ = ('EffectWidget', 'EffectBase', 'AdvancedEffectBase',
'MonochromeEffect', 'InvertEffect', 'ChannelMixEffect',
'ScanlinesEffect', 'PixelateEffect',
'HorizontalBlurEffect', 'VerticalBlurEffect',
'FXAAEffect')
shader_header = '''
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
'''
shader_uniforms = '''
uniform vec2 resolution;
uniform float time;
'''
shader_footer_trivial = '''
void main (void){
gl_FragColor = frag_color * texture2D(texture0, tex_coord0);
}
'''
shader_footer_effect = '''
void main (void){
vec4 normal_color = frag_color * texture2D(texture0, tex_coord0);
vec4 effect_color = effect(normal_color, texture0, tex_coord0,
gl_FragCoord.xy);
gl_FragColor = effect_color;
}
'''
effect_trivial = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return color;
}
'''
effect_monochrome = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
float mag = 1.0/3.0 * (color.x + color.y + color.z);
return vec4(mag, mag, mag, color.w);
}
'''
effect_invert = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
return vec4(1.0 - color.xyz, color.w);
}
'''
effect_mix = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
return vec4(color.{}, color.{}, color.{}, color.w);
}}
'''
effect_blur_h = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
float dt = ({} / 4.0) * 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x - 4.0*dt, tex_coords.y))
* 0.05;
sum += texture2D(texture, vec2(tex_coords.x - 3.0*dt, tex_coords.y))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x - 2.0*dt, tex_coords.y))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x - dt, tex_coords.y))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y))
* 0.16;
sum += texture2D(texture, vec2(tex_coords.x + dt, tex_coords.y))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x + 2.0*dt, tex_coords.y))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x + 3.0*dt, tex_coords.y))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x + 4.0*dt, tex_coords.y))
* 0.05;
return vec4(sum.xyz, color.w);
}}
'''
effect_blur_v = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{{
float dt = ({} / 4.0)
* 1.0 / resolution.x;
vec4 sum = vec4(0.0);
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 4.0*dt))
* 0.05;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 3.0*dt))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - 2.0*dt))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y - dt))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y))
* 0.16;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + dt))
* 0.15;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 2.0*dt))
* 0.12;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 3.0*dt))
* 0.09;
sum += texture2D(texture, vec2(tex_coords.x, tex_coords.y + 4.0*dt))
* 0.05;
return vec4(sum.xyz, color.w);
}}
'''
effect_postprocessing = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
vec2 q = tex_coords * vec2(1, -1);
vec2 uv = 0.5 + (q-0.5);//*(0.9);// + 0.1*sin(0.2*time));
vec3 oricol = texture2D(texture,vec2(q.x,1.0-q.y)).xyz;
vec3 col;
col.r = texture2D(texture,vec2(uv.x+0.003,-uv.y)).x;
col.g = texture2D(texture,vec2(uv.x+0.000,-uv.y)).y;
col.b = texture2D(texture,vec2(uv.x-0.003,-uv.y)).z;
col = clamp(col*0.5+0.5*col*col*1.2,0.0,1.0);
//col *= 0.5 + 0.5*16.0*uv.x*uv.y*(1.0-uv.x)*(1.0-uv.y);
col *= vec3(0.8,1.0,0.7);
col *= 0.9+0.1*sin(10.0*time+uv.y*1000.0);
col *= 0.97+0.03*sin(110.0*time);
float comp = smoothstep( 0.2, 0.7, sin(time) );
//col = mix( col, oricol, clamp(-2.0+2.0*q.x+3.0*comp,0.0,1.0) );
return vec4(col, color.w);
}
'''
effect_pixelate = '''
vec4 effect(vec4 vcolor, sampler2D texture, vec2 texcoord, vec2 pixel_coords)
{{
vec2 pixelSize = {} / resolution;
vec2 xy = floor(texcoord/pixelSize)*pixelSize + pixelSize/2.0;
return texture2D(texture, xy);
}}
'''
effect_fxaa = '''
vec4 effect( vec4 color, sampler2D buf0, vec2 texCoords, vec2 coords)
{
vec2 frameBufSize = resolution;
float FXAA_SPAN_MAX = 8.0;
float FXAA_REDUCE_MUL = 1.0/8.0;
float FXAA_REDUCE_MIN = 1.0/128.0;
vec3 rgbNW=texture2D(buf0,texCoords+(vec2(-1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbNE=texture2D(buf0,texCoords+(vec2(1.0,-1.0)/frameBufSize)).xyz;
vec3 rgbSW=texture2D(buf0,texCoords+(vec2(-1.0,1.0)/frameBufSize)).xyz;
vec3 rgbSE=texture2D(buf0,texCoords+(vec2(1.0,1.0)/frameBufSize)).xyz;
vec3 rgbM=texture2D(buf0,texCoords).xyz;
vec3 luma=vec3(0.299, 0.587, 0.114);
float lumaNW = dot(rgbNW, luma);
float lumaNE = dot(rgbNE, luma);
float lumaSW = dot(rgbSW, luma);
float lumaSE = dot(rgbSE, luma);
float lumaM = dot(rgbM, luma);
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
vec2 dir;
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
float dirReduce = max(
(lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL),
FXAA_REDUCE_MIN);
float rcpDirMin = 1.0/(min(abs(dir.x), abs(dir.y)) + dirReduce);
dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX),
dir * rcpDirMin)) / frameBufSize;
vec3 rgbA = (1.0/2.0) * (
texture2D(buf0, texCoords.xy + dir * (1.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (2.0/3.0 - 0.5)).xyz);
vec3 rgbB = rgbA * (1.0/2.0) + (1.0/4.0) * (
texture2D(buf0, texCoords.xy + dir * (0.0/3.0 - 0.5)).xyz +
texture2D(buf0, texCoords.xy + dir * (3.0/3.0 - 0.5)).xyz);
float lumaB = dot(rgbB, luma);
vec4 return_color;
if((lumaB < lumaMin) || (lumaB > lumaMax)){
return_color = vec4(rgbA, color.w);
}else{
return_color = vec4(rgbB, color.w);
}
return return_color;
}
'''
class EffectBase(EventDispatcher):
'''The base class for GLSL effects. It simply returns its input.
See the module documentation for more details.
'''
glsl = StringProperty(effect_trivial)
'''The glsl string defining your effect function. See the
module documentation for more details.
:attr:`glsl` is a :class:`~kivy.properties.StringProperty` and
defaults to
a trivial effect that returns its input.
'''
source = StringProperty('')
'''The (optional) filename from which to load the :attr:`glsl`
string.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to ''.
'''
fbo = ObjectProperty(None, allownone=True)
'''The fbo currently using this effect. The :class:`EffectBase`
automatically handles this.
:attr:`fbo` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, *args, **kwargs):
super(EffectBase, self).__init__(*args, **kwargs)
fbind = self.fbind
fbo_shader = self.set_fbo_shader
fbind('fbo', fbo_shader)
fbind('glsl', fbo_shader)
fbind('source', self._load_from_source)
def set_fbo_shader(self, *args):
'''Sets the :class:`~kivy.graphics.Fbo`'s shader by splicing
the :attr:`glsl` string into a full fragment shader.
The full shader is made up of :code:`shader_header +
shader_uniforms + self.glsl + shader_footer_effect`.
'''
if self.fbo is None:
return
self.fbo.set_fs(shader_header + shader_uniforms + self.glsl +
shader_footer_effect)
def _load_from_source(self, *args):
'''(internal) Loads the glsl string from a source file.'''
source = self.source
if not source:
return
filename = resource_find(source)
if filename is None:
return Logger.error('Error reading file {filename}'.
format(filename=source))
with open(filename) as fileh:
self.glsl = fileh.read()
class AdvancedEffectBase(EffectBase):
'''An :class:`EffectBase` with additional behavior to easily
set and update uniform variables in your shader.
This class is provided for convenience when implementing your own
effects: it is not used by any of those provided with Kivy.
In addition to your base glsl string that must be provided as
normal, the :class:`AdvancedEffectBase` has an extra property
:attr:`uniforms`, a dictionary of name-value pairs. Whenever
a value is changed, the new value for the uniform variable is
uploaded to the shader.
You must still manually declare your uniform variables at the top
of your glsl string.
'''
uniforms = DictProperty({})
'''A dictionary of uniform variable names and their values. These
are automatically uploaded to the :attr:`fbo` shader if appropriate.
uniforms is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
def __init__(self, *args, **kwargs):
super(AdvancedEffectBase, self).__init__(*args, **kwargs)
self.fbind('uniforms', self._update_uniforms)
def _update_uniforms(self, *args):
if self.fbo is None:
return
for key, value in self.uniforms.items():
self.fbo[key] = value
def set_fbo_shader(self, *args):
super(AdvancedEffectBase, self).set_fbo_shader(*args)
self._update_uniforms()
class MonochromeEffect(EffectBase):
'''Returns its input colors in monochrome.'''
def __init__(self, *args, **kwargs):
super(MonochromeEffect, self).__init__(*args, **kwargs)
self.glsl = effect_monochrome
class InvertEffect(EffectBase):
'''Inverts the colors in the input.'''
def __init__(self, *args, **kwargs):
super(InvertEffect, self).__init__(*args, **kwargs)
self.glsl = effect_invert
class ScanlinesEffect(EffectBase):
'''Adds scanlines to the input.'''
def __init__(self, *args, **kwargs):
super(ScanlinesEffect, self).__init__(*args, **kwargs)
self.glsl = effect_postprocessing
class ChannelMixEffect(EffectBase):
'''Mixes the color channels of the input according to the order
property. Channels may be arbitrarily rearranged or repeated.'''
order = ListProperty([1, 2, 0])
'''The new sorted order of the rgb channels.
order is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 2, 0], corresponding to (g, b, r).
'''
def __init__(self, *args, **kwargs):
super(ChannelMixEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_order(self, *args):
self.do_glsl()
def do_glsl(self):
letters = [{0: 'x', 1: 'y', 2: 'z'}[i] for i in self.order]
self.glsl = effect_mix.format(*letters)
class PixelateEffect(EffectBase):
'''Pixelates the input according to its
:attr:`~PixelateEffect.pixel_size`'''
pixel_size = NumericProperty(10)
'''
Sets the size of a new 'pixel' in the effect, in terms of number of
'real' pixels.
pixel_size is a :class:`~kivy.properties.NumericProperty` and
defaults to 10.
'''
def __init__(self, *args, **kwargs):
super(PixelateEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_pixel_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_pixelate.format(float(self.pixel_size))
class HorizontalBlurEffect(EffectBase):
'''Blurs the input horizontally, with the width given by
:attr:`~HorizontalBlurEffect.size`.'''
size = NumericProperty(4.0)
'''The blur width in pixels.
size is a :class:`~kivy.properties.NumericProperty` and defaults to
4.0.
'''
def __init__(self, *args, **kwargs):
super(HorizontalBlurEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_blur_h.format(float(self.size))
class VerticalBlurEffect(EffectBase):
'''Blurs the input vertically, with the width given by
:attr:`~VerticalBlurEffect.size`.'''
size = NumericProperty(4.0)
'''The blur width in pixels.
size is a :class:`~kivy.properties.NumericProperty` and defaults to
4.0.
'''
def __init__(self, *args, **kwargs):
super(VerticalBlurEffect, self).__init__(*args, **kwargs)
self.do_glsl()
def on_size(self, *args):
self.do_glsl()
def do_glsl(self):
self.glsl = effect_blur_v.format(float(self.size))
class FXAAEffect(EffectBase):
'''Applies very simple anti-aliasing via fxaa.'''
def __init__(self, *args, **kwargs):
super(FXAAEffect, self).__init__(*args, **kwargs)
self.glsl = effect_fxaa
class EffectFbo(Fbo):
'''An :class:`~kivy.graphics.Fbo` with extra functionality that allows
attempts to set a new shader. See :meth:`set_fs`.
'''
def __init__(self, *args, **kwargs):
super(EffectFbo, self).__init__(*args, **kwargs)
self.texture_rectangle = None
def set_fs(self, value):
'''Attempt to set the fragment shader to the given value.
If setting the shader fails, the existing one is preserved and an
exception is raised.
'''
shader = self.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('Setting new shader failed.')
class EffectWidget(RelativeLayout):
'''
Widget with the ability to apply a series of graphical effects to
its children. See the module documentation for more information on
setting effects and creating your own.
'''
background_color = ListProperty((0, 0, 0, 0))
'''This defines the background color to be used for the fbo in the
EffectWidget.
:attr:`background_color` is a :class:`ListProperty` defaults to
(0, 0, 0, 0)
'''
texture = ObjectProperty(None)
'''The output texture of the final :class:`~kivy.graphics.Fbo` after
all effects have been applied.
texture is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
effects = ListProperty([])
'''List of all the effects to be applied. These should all be
instances or subclasses of :class:`EffectBase`.
effects is a :class:`ListProperty` and defaults to [].
'''
fbo_list = ListProperty([])
'''(internal) List of all the fbos that are being used to apply
the effects.
fbo_list is a :class:`ListProperty` and defaults to [].
'''
_bound_effects = ListProperty([])
'''(internal) List of effect classes that have been given an fbo to
manage. This is necessary so that the fbo can be removed if the
effect is no longer in use.
_bound_effects is a :class:`ListProperty` and defaults to [].
'''
def __init__(self, **kwargs):
# Make sure opengl context exists
EventLoop.ensure_window()
self.canvas = RenderContext(use_parent_projection=True,
use_parent_modelview=True)
with self.canvas:
self.fbo = Fbo(size=self.size)
with self.fbo.before:
PushMatrix()
with self.fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
self._background_color = Color(*self.background_color)
self.fbo_rectangle = Rectangle(size=self.size)
with self.fbo.after:
PopMatrix()
super(EffectWidget, self).__init__(**kwargs)
Clock.schedule_interval(self._update_glsl, 0)
fbind = self.fbind
fbo_setup = self.refresh_fbo_setup
fbind('size', fbo_setup)
fbind('effects', fbo_setup)
fbind('background_color', self._refresh_background_color)
self.refresh_fbo_setup()
self._refresh_background_color() # In case thi was changed in kwargs
def _refresh_background_color(self, *args):
self._background_color.rgba = self.background_color
def _update_glsl(self, *largs):
'''(internal) Passes new time and resolution uniform
variables to the shader.
'''
time = Clock.get_boottime()
resolution = [float(size) for size in self.size]
self.canvas['time'] = time
self.canvas['resolution'] = resolution
for fbo in self.fbo_list:
fbo['time'] = time
fbo['resolution'] = resolution
def refresh_fbo_setup(self, *args):
'''(internal) Creates and assigns one :class:`~kivy.graphics.Fbo`
per effect, and makes sure all sizes etc. are correct and
consistent.
'''
# Add/remove fbos until there is one per effect
while len(self.fbo_list) < len(self.effects):
with self.canvas:
new_fbo = EffectFbo(size=self.size)
with new_fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
Color(1, 1, 1, 1)
new_fbo.texture_rectangle = Rectangle(size=self.size)
new_fbo.texture_rectangle.size = self.size
self.fbo_list.append(new_fbo)
while len(self.fbo_list) > len(self.effects):
old_fbo = self.fbo_list.pop()
self.canvas.remove(old_fbo)
# Remove fbos from unused effects
for effect in self._bound_effects:
if effect not in self.effects:
effect.fbo = None
self._bound_effects = self.effects
# Do resizing etc.
self.fbo.size = self.size
self.fbo_rectangle.size = self.size
for i in range(len(self.fbo_list)):
self.fbo_list[i].size = self.size
self.fbo_list[i].texture_rectangle.size = self.size
# If there are no effects, just draw our main fbo
if len(self.fbo_list) == 0:
self.texture = self.fbo.texture
return
for i in range(1, len(self.fbo_list)):
fbo = self.fbo_list[i]
fbo.texture_rectangle.texture = self.fbo_list[i - 1].texture
# Build effect shaders
for effect, fbo in zip(self.effects, self.fbo_list):
effect.fbo = fbo
self.fbo_list[0].texture_rectangle.texture = self.fbo.texture
self.texture = self.fbo_list[-1].texture
for fbo in self.fbo_list:
fbo.draw()
self.fbo.draw()
def add_widget(self, widget):
# Add the widget to our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).add_widget(widget)
self.canvas = c
def remove_widget(self, widget):
# Remove the widget from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).remove_widget(widget)
self.canvas = c
def clear_widgets(self, children=None):
# Clear widgets from our Fbo instead of the normal canvas
c = self.canvas
self.canvas = self.fbo
super(EffectWidget, self).clear_widgets(children)
self.canvas = c
|
ask-compu/python-twitter
|
refs/heads/master
|
simplejson/encoder.py
|
138
|
"""Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
Antiun/odoo
|
refs/heads/8.0
|
openerp/tests/addons/test_translation_import/__openerp__.py
|
352
|
# -*- coding: utf-8 -*-
{
'name': 'test-translation-import',
'version': '0.1',
'category': 'Tests',
'description': """A module to test translation import.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['view.xml'],
'test': ['tests.yml'],
'installable': True,
'auto_install': False,
}
|
ytlei/808X-mid-proj
|
refs/heads/master
|
vendor/googletest/googletest/scripts/upload_gtest.py
|
1963
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
Kaik541/kernel_lge_gee
|
refs/heads/jb-devel
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
ccowmu/whatistheplan.com
|
refs/heads/master
|
tests/test_routes.py
|
1
|
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
class RoutesTest(TestCase):
def setUp(self):
self.client = Client()
self.logged_in_client = Client()
self.user = User.objects.create_user("testuser", "test@email.com", "test_password")
self.logged_in_client.login(username="testuser", password="test_password")
def test_home_route(self):
"""Home returns 200"""
response = self.client.get(reverse('Home'))
self.assertEqual(response.status_code, 200)
def test_events_route(self):
"""Events returns 200"""
response = self.client.get(reverse('Events'))
self.assertEqual(response.status_code, 200)
def test_about_route(self):
"""About returns 200"""
response = self.client.get(reverse('About'))
self.assertEqual(response.status_code, 200)
def test_twitch_route(self):
response = self.client.get(reverse('Twitch'))
self.assertEqual(response.status_code, 200)
def test_sign_up_route(self):
"""Sign Up returns 200"""
response = self.client.get(reverse('Sign Up'))
self.assertEqual(response.status_code, 200)
def test_log_in_route(self):
"""Log in returns 200"""
response = self.client.get(reverse('Log In'))
self.assertEqual(response.status_code, 200)
def test_log_out_route_for_logged_in_user(self):
"""Log Out redirects home for a logged in user"""
response = self.logged_in_client.get(reverse('Log Out'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/')
def tearDown(self):
self.user.delete()
|
elba7r/lite-system
|
refs/heads/master
|
erpnext/patches/v5_4/notify_system_managers_regarding_wrong_tax_calculation.py
|
45
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.email import sendmail_to_system_managers
from frappe.utils import get_link_to_form
def execute():
wrong_records = []
for dt in ("Quotation", "Sales Order", "Delivery Note", "Sales Invoice",
"Purchase Order", "Purchase Receipt", "Purchase Invoice"):
records = frappe.db.sql_list("""select name from `tab{0}`
where apply_discount_on = 'Net Total' and ifnull(discount_amount, 0) != 0
and modified >= '2015-02-17' and docstatus=1""".format(dt))
if records:
records = [get_link_to_form(dt, d) for d in records]
wrong_records.append([dt, records])
if wrong_records:
content = """Dear System Manager,
Due to an error related to Discount Amount on Net Total, tax calculation might be wrong in the following records. We did not fix the tax amount automatically because it can corrupt the entries, so we request you to check these records and amend if you found the calculation wrong.
Please check following Entries:
%s
Regards,
Administrator""" % "\n".join([(d[0] + ": " + ", ".join(d[1])) for d in wrong_records])
try:
sendmail_to_system_managers("[Important] [ERPNext] Tax calculation might be wrong, please check.", content)
except:
pass
print "="*50
print content
print "="*50
|
mammique/django
|
refs/heads/tp_alpha
|
tests/regressiontests/null_fk/tests.py
|
118
|
from __future__ import absolute_import, unicode_literals
from django.db.models import Q
from django.test import TestCase
from .models import (SystemDetails, Item, PropertyValue, SystemInfo, Forum,
Post, Comment)
class NullFkTests(TestCase):
def test_null_fk(self):
d = SystemDetails.objects.create(details='First details')
s = SystemInfo.objects.create(system_name='First forum', system_details=d)
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
# Starting from comment, make sure that a .select_related(...) with a specified
# set of fields will properly LEFT JOIN multiple levels of NULLs (and the things
# that come after the NULLs, or else data that should exist won't). Regression
# test for #7369.
c = Comment.objects.select_related().get(id=c1.id)
self.assertEqual(c.post, p)
self.assertEqual(Comment.objects.select_related().get(id=c2.id).post, None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info').all(),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform = lambda c: (c.id, c.comment_text, repr(c.post))
)
# Regression test for #7530, #7716.
self.assertTrue(Comment.objects.select_related('post').filter(post__isnull=True)[0].post is None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info__system_details'),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform = lambda c: (c.id, c.comment_text, repr(c.post))
)
def test_combine_isnull(self):
item = Item.objects.create(title='Some Item')
pv = PropertyValue.objects.create(label='Some Value')
item.props.create(key='a', value=pv)
item.props.create(key='b') # value=NULL
q1 = Q(props__key='a', props__value=pv)
q2 = Q(props__key='b', props__value__isnull=True)
# Each of these individually should return the item.
self.assertEqual(Item.objects.get(q1), item)
self.assertEqual(Item.objects.get(q2), item)
# Logically, qs1 and qs2, and qs3 and qs4 should be the same.
qs1 = Item.objects.filter(q1) & Item.objects.filter(q2)
qs2 = Item.objects.filter(q2) & Item.objects.filter(q1)
qs3 = Item.objects.filter(q1) | Item.objects.filter(q2)
qs4 = Item.objects.filter(q2) | Item.objects.filter(q1)
# Regression test for #15823.
self.assertEqual(list(qs1), list(qs2))
self.assertEqual(list(qs3), list(qs4))
|
NMGRL/pychron
|
refs/heads/develop
|
pychron/github.py
|
2
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from __future__ import absolute_import
from __future__ import print_function
import base64
import requests
# ============= local library imports ==========================
from pychron import json
from pychron.core.helpers.datetime_tools import format_iso_datetime
GITHUB_API_URL = 'https://api.github.com'
def make_request(r):
return '{}{}'.format(GITHUB_API_URL, r)
def get_list(cmd, attr='name', headers=None):
if headers is None:
headers = {}
cmd = make_request(cmd)
with requests.Session() as s:
def _rget(ci):
r = s.get(ci, headers=headers)
result = r.json()
if attr:
result = [di[attr] for di in result]
try:
dd = _rget(r.links['next']['url'])
except KeyError:
return result
if attr:
dd = [di[attr] for di in dd]
result.extend(dd)
return result
return _rget(cmd)
# return [item[attr] for item in json.loads(doc.text)]
def get_branches(name):
cmd = '/repos/{}/branches'.format(name)
return get_list(cmd)
def get_tags(name):
cmd = '/repos/{}/tags'.format(name)
return get_list(cmd)
def get_organization_repositiories(name, attr='name'):
cmd = '/orgs/{}/repos'.format(name)
return get_list(cmd, attr=attr)
def create_organization_repository(org, name, usr, pwd, **kw):
cmd = '/orgs/{}/repos'.format(org)
cmd = make_request(cmd)
payload = {'name': name}
payload.update(**kw)
auth = base64.encodestring('{}:{}'.format(usr, pwd)).replace('\n', '')
headers = {"Authorization": "Basic {}".format(auth)}
r = requests.post(cmd, data=json.dumps(payload), headers=headers)
print(cmd, payload, usr, pwd)
print(r)
return r
class GithubObject(object):
def __init__(self, usr='', pwd='', oauth_token=None):
self._pwd = pwd
self._usr = usr
self._oauth_token = oauth_token
def _make_headers(self, auth=True):
headers = {}
if auth:
if self._oauth_token:
auth = 'token {}'.format(self._oauth_token)
else:
auth = base64.encodestring('{}:{}'.format(self._usr, self._pwd)).replace('\n', '')
auth = 'Basic {}'.format(auth)
headers['Authorization'] = auth
print(headers)
return headers
def _process_post(self, po):
pass
class RepositoryRecord:
pass
class Organization(GithubObject):
def __init__(self, name, *args, **kw):
self._name = name
super(Organization, self).__init__(*args, **kw)
@property
def base_cmd(self):
return '/orgs/{}'.format(self._name)
@property
def repo_names(self):
return sorted([repo['name'] for repo in self.get_repos()])
@property
def info(self):
cmd = make_request(self.base_cmd)
doc = requests.get(cmd)
return json.loads(doc.text)
def repos(self, attributes):
return [self._repo_factory(ri, attributes) for ri in self.get_repos()]
def get_repos(self):
cmd = '/orgs/{}/repos'.format(self._name)
r = get_list(cmd, attr=None, headers=self._make_headers())
# r = get_organization_repositiories(self._name, attr=None)
if not isinstance(r, list):
r = []
return r
def has_repo(self, name):
return name in self.repo_names
def create_repo(self, name, usr, pwd, **payload):
return create_organization_repository(self._name, name, usr, pwd, **payload)
# cmd = make_request(self.base_cmd)
# payload['name'] = name
#
# headers = self._make_headers(auth=True)
# r = requests.post(cmd, data=json.dumps(payload), headers=headers)
# self._process_post(r)
def _repo_factory(self, ri, attributes):
repo = RepositoryRecord()
date_attrs = ('pushed_at', 'created_at')
for ai in attributes:
v = ri[ai]
if ai in date_attrs:
v = format_iso_datetime(ai)
setattr(repo, ai, v)
return repo
if __name__ == '__main__':
with open('/Users/ross/Programming/githubauth.txt') as rfile:
usr = rfile.readline().strip()
pwd = rfile.readline().strip()
# print get_organization_repositiories('NMGRL')
org = Organization('NMGRLData', usr, pwd)
print(org.repo_names, len(org.repo_names))
# print org.create_repo('test2', auto_init=True)
# print org.repos, len(org.repos)
# ============= EOF =============================================
|
tangfeixiong/nova
|
refs/heads/stable/juno
|
nova/tests/unit/virt/test_configdrive.py
|
46
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from nova import test
from nova.virt import configdrive
class ConfigDriveTestCase(test.NoDBTestCase):
def test_valid_string_values(self):
for value in (strutils.TRUE_STRINGS + ('always',)):
self.flags(force_config_drive=value)
self.assertTrue(configdrive.required_by({}))
def test_invalid_string_values(self):
for value in (strutils.FALSE_STRINGS + ('foo',)):
self.flags(force_config_drive=value)
self.assertFalse(configdrive.required_by({}))
|
carlomt/dicom_tools
|
refs/heads/master
|
dicom_tools/pyqtgraph/opengl/glInfo.py
|
50
|
from ..Qt import QtCore, QtGui, QtOpenGL
from OpenGL.GL import *
app = QtGui.QApplication([])
class GLTest(QtOpenGL.QGLWidget):
def __init__(self):
QtOpenGL.QGLWidget.__init__(self)
self.makeCurrent()
print("GL version:" + glGetString(GL_VERSION))
print("MAX_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))
print("MAX_3D_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))
print("Extensions: " + glGetString(GL_EXTENSIONS))
GLTest()
|
vadimkantorov/wigwam
|
refs/heads/master
|
wigs/gdb.py
|
1
|
class gdb(Wig):
tarball_uri = 'http://ftp.gnu.org/gnu/gdb/gdb-{RELEASE_VERSION}.tar.gz'
last_release_version = '7.12'
dependencies = ['texinfo']
|
pgmillon/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_eip.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eip
short_description: manages EC2 elastic IP (EIP) addresses.
description:
- This module can allocate or release an EIP.
- This module can associate/disassociate an EIP with instances or network interfaces.
version_added: "1.4"
options:
device_id:
description:
- The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
required: false
aliases: [ instance_id ]
version_added: "2.0"
public_ip:
description:
- The IP address of a previously allocated EIP.
- If present and device is specified, the EIP is associated with the device.
- If absent and device is specified, the EIP is disassociated from the device.
aliases: [ ip ]
state:
description:
- If present, allocate an EIP or associate an existing EIP with a device.
- If absent, disassociate the EIP from the device and optionally release it.
choices: ['present', 'absent']
default: present
in_vpc:
description:
- Allocate an EIP inside a VPC or not. Required if specifying an ENI.
default: 'no'
type: bool
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
default: 'no'
type: bool
version_added: "1.6"
release_on_disassociation:
description:
- whether or not to automatically release the EIP when it is disassociated
default: 'no'
type: bool
version_added: "2.0"
private_ip_address:
description:
- The primary or secondary private IP address to associate with the Elastic IP address.
version_added: "2.3"
allow_reassociation:
description:
- Specify this option to allow an Elastic IP address that is already associated with another
network interface or instance to be re-associated with the specified instance or interface.
default: 'no'
type: bool
version_added: "2.5"
extends_documentation_fragment:
- aws
- ec2
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
notes:
- There may be a delay between the time the EIP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
- This module returns multiple changed statuses on disassociation or release.
It returns an overall status based on any changes occurring. It also returns
individual changed statuses for disassociation and release.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: associate an elastic IP with an instance
ec2_eip:
device_id: i-1212f003
ip: 93.184.216.119
- name: associate an elastic IP with a device
ec2_eip:
device_id: eni-c8ad70f3
ip: 93.184.216.119
- name: associate an elastic IP with a device and allow reassociation
ec2_eip:
device_id: eni-c8ad70f3
public_ip: 93.184.216.119
allow_reassociation: yes
- name: disassociate an elastic IP from an instance
ec2_eip:
device_id: i-1212f003
ip: 93.184.216.119
state: absent
- name: disassociate an elastic IP with a device
ec2_eip:
device_id: eni-c8ad70f3
ip: 93.184.216.119
state: absent
- name: allocate a new elastic IP and associate it with an instance
ec2_eip:
device_id: i-1212f003
- name: allocate a new elastic IP without associating it to anything
ec2_eip:
state: present
register: eip
- name: output the IP
debug:
msg: "Allocated IP is {{ eip.public_ip }}"
- name: provision new instances with ec2
ec2:
keypair: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
count: 3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip:
device_id: "{{ item }}"
loop: "{{ ec2.instance_ids }}"
- name: allocate a new elastic IP inside a VPC in us-west-2
ec2_eip:
region: us-west-2
in_vpc: yes
register: eip
- name: output the IP
debug:
msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
RETURN = '''
allocation_id:
description: allocation_id of the elastic ip
returned: on success
type: str
sample: eipalloc-51aa3a6c
public_ip:
description: an elastic ip address
returned: on success
type: str
sample: 52.88.159.209
'''
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
class EIPException(Exception):
pass
def associate_ip_and_device(ec2, address, private_ip_address, device_id, allow_reassociation, check_mode, isinstance=True):
if address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if isinstance:
if address.domain == "vpc":
res = ec2.associate_address(device_id,
allocation_id=address.allocation_id,
private_ip_address=private_ip_address,
allow_reassociation=allow_reassociation)
else:
res = ec2.associate_address(device_id,
public_ip=address.public_ip,
private_ip_address=private_ip_address,
allow_reassociation=allow_reassociation)
else:
res = ec2.associate_address(network_interface_id=device_id,
allocation_id=address.allocation_id,
private_ip_address=private_ip_address,
allow_reassociation=allow_reassociation)
if not res:
raise EIPException('association failed')
return {'changed': True}
def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
if not address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.disassociate_address(
association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
if not res:
raise EIPException('disassociation failed')
return {'changed': True}
def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{0}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_device_id(ec2, device_id, isinstance=True):
if isinstance:
addresses = ec2.get_all_addresses(None, {'instance-id': device_id})
else:
addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, device_id, isinstance=True):
""" Find an existing Elastic IP address """
if public_ip:
return _find_address_by_ip(ec2, public_ip)
elif device_id and isinstance:
return _find_address_by_device_id(ec2, device_id)
elif device_id:
return _find_address_by_device_id(ec2, device_id, isinstance=False)
def address_is_associated_with_device(ec2, address, device_id, isinstance=True):
""" Check if the elastic IP is currently associated with the device """
address = ec2.get_all_addresses(address.public_ip)
if address:
if isinstance:
return address and address[0].instance_id == device_id
else:
return address and address[0].network_interface_id == device_id
return False
def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
if reuse_existing_ip_allowed:
domain_filter = {'domain': domain or 'standard'}
all_addresses = ec2.get_all_addresses(filters=domain_filter)
if domain == 'vpc':
unassociated_addresses = [a for a in all_addresses
if not a.association_id]
else:
unassociated_addresses = [a for a in all_addresses
if not a.instance_id]
if unassociated_addresses:
return unassociated_addresses[0], False
return ec2.allocate_address(domain=domain), True
def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """
# If we're in check mode, nothing else to do
if not check_mode:
if not address.release():
raise EIPException('release failed')
return {'changed': True}
def find_device(ec2, module, device_id, isinstance=True):
""" Attempt to find the EC2 instance and return it """
if isinstance:
try:
reservations = ec2.get_all_reservations(instance_ids=[device_id])
except boto.exception.EC2ResponseError as e:
module.fail_json(msg=str(e))
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
else:
try:
interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id])
except boto.exception.EC2ResponseError as e:
module.fail_json(msg=str(e))
if len(interfaces) == 1:
return interfaces[0]
raise EIPException("could not find instance" + device_id)
def ensure_present(ec2, module, domain, address, private_ip_address, device_id,
reuse_existing_ip_allowed, allow_reassociation, check_mode, isinstance=True):
changed = False
# Return the EIP object since we've been given a public IP
if not address:
if check_mode:
return {'changed': True}
address, changed = allocate_address(ec2, domain, reuse_existing_ip_allowed)
if device_id:
# Allocate an IP for instance since no public_ip was provided
if isinstance:
instance = find_device(ec2, module, device_id)
if reuse_existing_ip_allowed:
if instance.vpc_id and len(instance.vpc_id) > 0 and domain is None:
raise EIPException("You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc")
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, private_ip_address, device_id, allow_reassociation,
check_mode)
else:
instance = find_device(ec2, module, device_id, isinstance=False)
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, private_ip_address, device_id, allow_reassociation,
check_mode, isinstance=False)
if instance.vpc_id:
domain = 'vpc'
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip, 'allocation_id': address.allocation_id}
def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True):
if not address:
return {'changed': False}
# disassociating address from instance
if device_id:
if isinstance:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode)
else:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode, isinstance=False)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
device_id=dict(required=False, aliases=['instance_id']),
public_ip=dict(required=False, aliases=['ip']),
state=dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed=dict(required=False, type='bool',
default=False),
release_on_disassociation=dict(required=False, type='bool', default=False),
allow_reassociation=dict(type='bool', default=False),
wait_timeout=dict(default=300, type='int'),
private_ip_address=dict(required=False, default=None, type='str')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_by={
'private_ip_address': ['device_id'],
},
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
device_id = module.params.get('device_id')
instance_id = module.params.get('instance_id')
public_ip = module.params.get('public_ip')
private_ip_address = module.params.get('private_ip_address')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
release_on_disassociation = module.params.get('release_on_disassociation')
allow_reassociation = module.params.get('allow_reassociation')
if instance_id:
warnings = ["instance_id is no longer used, please use device_id going forward"]
is_instance = True
device_id = instance_id
else:
if device_id and device_id.startswith('i-'):
is_instance = True
elif device_id:
if device_id.startswith('eni-') and not in_vpc:
module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
is_instance = False
try:
if device_id:
address = find_address(ec2, public_ip, device_id, isinstance=is_instance)
else:
address = find_address(ec2, public_ip, None)
if state == 'present':
if device_id:
result = ensure_present(ec2, module, domain, address, private_ip_address, device_id,
reuse_existing_ip_allowed, allow_reassociation,
module.check_mode, isinstance=is_instance)
else:
if address:
changed = False
else:
address, changed = allocate_address(ec2, domain, reuse_existing_ip_allowed)
result = {'changed': changed, 'public_ip': address.public_ip, 'allocation_id': address.allocation_id}
else:
if device_id:
disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance)
if release_on_disassociation and disassociated['changed']:
released = release_address(ec2, address, module.check_mode)
result = {'changed': True, 'disassociated': disassociated, 'released': released}
else:
result = {'changed': disassociated['changed'], 'disassociated': disassociated, 'released': {'changed': False}}
else:
released = release_address(ec2, address, module.check_mode)
result = {'changed': released['changed'], 'disassociated': {'changed': False}, 'released': released}
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
if instance_id:
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
|
itabulous/mysql-connector-python
|
refs/heads/master
|
lib/mysql/connector/errorcode.py
|
14
|
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This file was auto-generated.
_GENERATED_ON = '2015-07-02'
_MYSQL_VERSION = (5, 7, 7)
"""This module contains the MySQL Server and Client error codes"""
# Start MySQL Errors
ER_HASHCHK = 1000
ER_NISAMCHK = 1001
ER_NO = 1002
ER_YES = 1003
ER_CANT_CREATE_FILE = 1004
ER_CANT_CREATE_TABLE = 1005
ER_CANT_CREATE_DB = 1006
ER_DB_CREATE_EXISTS = 1007
ER_DB_DROP_EXISTS = 1008
ER_DB_DROP_DELETE = 1009
ER_DB_DROP_RMDIR = 1010
ER_CANT_DELETE_FILE = 1011
ER_CANT_FIND_SYSTEM_REC = 1012
ER_CANT_GET_STAT = 1013
ER_CANT_GET_WD = 1014
ER_CANT_LOCK = 1015
ER_CANT_OPEN_FILE = 1016
ER_FILE_NOT_FOUND = 1017
ER_CANT_READ_DIR = 1018
ER_CANT_SET_WD = 1019
ER_CHECKREAD = 1020
ER_DISK_FULL = 1021
ER_DUP_KEY = 1022
ER_ERROR_ON_CLOSE = 1023
ER_ERROR_ON_READ = 1024
ER_ERROR_ON_RENAME = 1025
ER_ERROR_ON_WRITE = 1026
ER_FILE_USED = 1027
ER_FILSORT_ABORT = 1028
ER_FORM_NOT_FOUND = 1029
ER_GET_ERRNO = 1030
ER_ILLEGAL_HA = 1031
ER_KEY_NOT_FOUND = 1032
ER_NOT_FORM_FILE = 1033
ER_NOT_KEYFILE = 1034
ER_OLD_KEYFILE = 1035
ER_OPEN_AS_READONLY = 1036
ER_OUTOFMEMORY = 1037
ER_OUT_OF_SORTMEMORY = 1038
ER_UNEXPECTED_EOF = 1039
ER_CON_COUNT_ERROR = 1040
ER_OUT_OF_RESOURCES = 1041
ER_BAD_HOST_ERROR = 1042
ER_HANDSHAKE_ERROR = 1043
ER_DBACCESS_DENIED_ERROR = 1044
ER_ACCESS_DENIED_ERROR = 1045
ER_NO_DB_ERROR = 1046
ER_UNKNOWN_COM_ERROR = 1047
ER_BAD_NULL_ERROR = 1048
ER_BAD_DB_ERROR = 1049
ER_TABLE_EXISTS_ERROR = 1050
ER_BAD_TABLE_ERROR = 1051
ER_NON_UNIQ_ERROR = 1052
ER_SERVER_SHUTDOWN = 1053
ER_BAD_FIELD_ERROR = 1054
ER_WRONG_FIELD_WITH_GROUP = 1055
ER_WRONG_GROUP_FIELD = 1056
ER_WRONG_SUM_SELECT = 1057
ER_WRONG_VALUE_COUNT = 1058
ER_TOO_LONG_IDENT = 1059
ER_DUP_FIELDNAME = 1060
ER_DUP_KEYNAME = 1061
ER_DUP_ENTRY = 1062
ER_WRONG_FIELD_SPEC = 1063
ER_PARSE_ERROR = 1064
ER_EMPTY_QUERY = 1065
ER_NONUNIQ_TABLE = 1066
ER_INVALID_DEFAULT = 1067
ER_MULTIPLE_PRI_KEY = 1068
ER_TOO_MANY_KEYS = 1069
ER_TOO_MANY_KEY_PARTS = 1070
ER_TOO_LONG_KEY = 1071
ER_KEY_COLUMN_DOES_NOT_EXITS = 1072
ER_BLOB_USED_AS_KEY = 1073
ER_TOO_BIG_FIELDLENGTH = 1074
ER_WRONG_AUTO_KEY = 1075
ER_READY = 1076
ER_NORMAL_SHUTDOWN = 1077
ER_GOT_SIGNAL = 1078
ER_SHUTDOWN_COMPLETE = 1079
ER_FORCING_CLOSE = 1080
ER_IPSOCK_ERROR = 1081
ER_NO_SUCH_INDEX = 1082
ER_WRONG_FIELD_TERMINATORS = 1083
ER_BLOBS_AND_NO_TERMINATED = 1084
ER_TEXTFILE_NOT_READABLE = 1085
ER_FILE_EXISTS_ERROR = 1086
ER_LOAD_INFO = 1087
ER_ALTER_INFO = 1088
ER_WRONG_SUB_KEY = 1089
ER_CANT_REMOVE_ALL_FIELDS = 1090
ER_CANT_DROP_FIELD_OR_KEY = 1091
ER_INSERT_INFO = 1092
ER_UPDATE_TABLE_USED = 1093
ER_NO_SUCH_THREAD = 1094
ER_KILL_DENIED_ERROR = 1095
ER_NO_TABLES_USED = 1096
ER_TOO_BIG_SET = 1097
ER_NO_UNIQUE_LOGFILE = 1098
ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099
ER_TABLE_NOT_LOCKED = 1100
ER_BLOB_CANT_HAVE_DEFAULT = 1101
ER_WRONG_DB_NAME = 1102
ER_WRONG_TABLE_NAME = 1103
ER_TOO_BIG_SELECT = 1104
ER_UNKNOWN_ERROR = 1105
ER_UNKNOWN_PROCEDURE = 1106
ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108
ER_UNKNOWN_TABLE = 1109
ER_FIELD_SPECIFIED_TWICE = 1110
ER_INVALID_GROUP_FUNC_USE = 1111
ER_UNSUPPORTED_EXTENSION = 1112
ER_TABLE_MUST_HAVE_COLUMNS = 1113
ER_RECORD_FILE_FULL = 1114
ER_UNKNOWN_CHARACTER_SET = 1115
ER_TOO_MANY_TABLES = 1116
ER_TOO_MANY_FIELDS = 1117
ER_TOO_BIG_ROWSIZE = 1118
ER_STACK_OVERRUN = 1119
ER_WRONG_OUTER_JOIN = 1120
ER_NULL_COLUMN_IN_INDEX = 1121
ER_CANT_FIND_UDF = 1122
ER_CANT_INITIALIZE_UDF = 1123
ER_UDF_NO_PATHS = 1124
ER_UDF_EXISTS = 1125
ER_CANT_OPEN_LIBRARY = 1126
ER_CANT_FIND_DL_ENTRY = 1127
ER_FUNCTION_NOT_DEFINED = 1128
ER_HOST_IS_BLOCKED = 1129
ER_HOST_NOT_PRIVILEGED = 1130
ER_PASSWORD_ANONYMOUS_USER = 1131
ER_PASSWORD_NOT_ALLOWED = 1132
ER_PASSWORD_NO_MATCH = 1133
ER_UPDATE_INFO = 1134
ER_CANT_CREATE_THREAD = 1135
ER_WRONG_VALUE_COUNT_ON_ROW = 1136
ER_CANT_REOPEN_TABLE = 1137
ER_INVALID_USE_OF_NULL = 1138
ER_REGEXP_ERROR = 1139
ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
ER_NONEXISTING_GRANT = 1141
ER_TABLEACCESS_DENIED_ERROR = 1142
ER_COLUMNACCESS_DENIED_ERROR = 1143
ER_ILLEGAL_GRANT_FOR_TABLE = 1144
ER_GRANT_WRONG_HOST_OR_USER = 1145
ER_NO_SUCH_TABLE = 1146
ER_NONEXISTING_TABLE_GRANT = 1147
ER_NOT_ALLOWED_COMMAND = 1148
ER_SYNTAX_ERROR = 1149
ER_UNUSED1 = 1150
ER_UNUSED2 = 1151
ER_ABORTING_CONNECTION = 1152
ER_NET_PACKET_TOO_LARGE = 1153
ER_NET_READ_ERROR_FROM_PIPE = 1154
ER_NET_FCNTL_ERROR = 1155
ER_NET_PACKETS_OUT_OF_ORDER = 1156
ER_NET_UNCOMPRESS_ERROR = 1157
ER_NET_READ_ERROR = 1158
ER_NET_READ_INTERRUPTED = 1159
ER_NET_ERROR_ON_WRITE = 1160
ER_NET_WRITE_INTERRUPTED = 1161
ER_TOO_LONG_STRING = 1162
ER_TABLE_CANT_HANDLE_BLOB = 1163
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
ER_UNUSED3 = 1165
ER_WRONG_COLUMN_NAME = 1166
ER_WRONG_KEY_COLUMN = 1167
ER_WRONG_MRG_TABLE = 1168
ER_DUP_UNIQUE = 1169
ER_BLOB_KEY_WITHOUT_LENGTH = 1170
ER_PRIMARY_CANT_HAVE_NULL = 1171
ER_TOO_MANY_ROWS = 1172
ER_REQUIRES_PRIMARY_KEY = 1173
ER_NO_RAID_COMPILED = 1174
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
ER_KEY_DOES_NOT_EXITS = 1176
ER_CHECK_NO_SUCH_TABLE = 1177
ER_CHECK_NOT_IMPLEMENTED = 1178
ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ER_ERROR_DURING_COMMIT = 1180
ER_ERROR_DURING_ROLLBACK = 1181
ER_ERROR_DURING_FLUSH_LOGS = 1182
ER_ERROR_DURING_CHECKPOINT = 1183
ER_NEW_ABORTING_CONNECTION = 1184
ER_DUMP_NOT_IMPLEMENTED = 1185
ER_FLUSH_MASTER_BINLOG_CLOSED = 1186
ER_INDEX_REBUILD = 1187
ER_MASTER = 1188
ER_MASTER_NET_READ = 1189
ER_MASTER_NET_WRITE = 1190
ER_FT_MATCHING_KEY_NOT_FOUND = 1191
ER_LOCK_OR_ACTIVE_TRANSACTION = 1192
ER_UNKNOWN_SYSTEM_VARIABLE = 1193
ER_CRASHED_ON_USAGE = 1194
ER_CRASHED_ON_REPAIR = 1195
ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196
ER_TRANS_CACHE_FULL = 1197
ER_SLAVE_MUST_STOP = 1198
ER_SLAVE_NOT_RUNNING = 1199
ER_BAD_SLAVE = 1200
ER_MASTER_INFO = 1201
ER_SLAVE_THREAD = 1202
ER_TOO_MANY_USER_CONNECTIONS = 1203
ER_SET_CONSTANTS_ONLY = 1204
ER_LOCK_WAIT_TIMEOUT = 1205
ER_LOCK_TABLE_FULL = 1206
ER_READ_ONLY_TRANSACTION = 1207
ER_DROP_DB_WITH_READ_LOCK = 1208
ER_CREATE_DB_WITH_READ_LOCK = 1209
ER_WRONG_ARGUMENTS = 1210
ER_NO_PERMISSION_TO_CREATE_USER = 1211
ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212
ER_LOCK_DEADLOCK = 1213
ER_TABLE_CANT_HANDLE_FT = 1214
ER_CANNOT_ADD_FOREIGN = 1215
ER_NO_REFERENCED_ROW = 1216
ER_ROW_IS_REFERENCED = 1217
ER_CONNECT_TO_MASTER = 1218
ER_QUERY_ON_MASTER = 1219
ER_ERROR_WHEN_EXECUTING_COMMAND = 1220
ER_WRONG_USAGE = 1221
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
ER_CANT_UPDATE_WITH_READLOCK = 1223
ER_MIXING_NOT_ALLOWED = 1224
ER_DUP_ARGUMENT = 1225
ER_USER_LIMIT_REACHED = 1226
ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227
ER_LOCAL_VARIABLE = 1228
ER_GLOBAL_VARIABLE = 1229
ER_NO_DEFAULT = 1230
ER_WRONG_VALUE_FOR_VAR = 1231
ER_WRONG_TYPE_FOR_VAR = 1232
ER_VAR_CANT_BE_READ = 1233
ER_CANT_USE_OPTION_HERE = 1234
ER_NOT_SUPPORTED_YET = 1235
ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236
ER_SLAVE_IGNORED_TABLE = 1237
ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238
ER_WRONG_FK_DEF = 1239
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
ER_OPERAND_COLUMNS = 1241
ER_SUBQUERY_NO_1_ROW = 1242
ER_UNKNOWN_STMT_HANDLER = 1243
ER_CORRUPT_HELP_DB = 1244
ER_CYCLIC_REFERENCE = 1245
ER_AUTO_CONVERT = 1246
ER_ILLEGAL_REFERENCE = 1247
ER_DERIVED_MUST_HAVE_ALIAS = 1248
ER_SELECT_REDUCED = 1249
ER_TABLENAME_NOT_ALLOWED_HERE = 1250
ER_NOT_SUPPORTED_AUTH_MODE = 1251
ER_SPATIAL_CANT_HAVE_NULL = 1252
ER_COLLATION_CHARSET_MISMATCH = 1253
ER_SLAVE_WAS_RUNNING = 1254
ER_SLAVE_WAS_NOT_RUNNING = 1255
ER_TOO_BIG_FOR_UNCOMPRESS = 1256
ER_ZLIB_Z_MEM_ERROR = 1257
ER_ZLIB_Z_BUF_ERROR = 1258
ER_ZLIB_Z_DATA_ERROR = 1259
ER_CUT_VALUE_GROUP_CONCAT = 1260
ER_WARN_TOO_FEW_RECORDS = 1261
ER_WARN_TOO_MANY_RECORDS = 1262
ER_WARN_NULL_TO_NOTNULL = 1263
ER_WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
ER_WARN_USING_OTHER_HANDLER = 1266
ER_CANT_AGGREGATE_2COLLATIONS = 1267
ER_DROP_USER = 1268
ER_REVOKE_GRANTS = 1269
ER_CANT_AGGREGATE_3COLLATIONS = 1270
ER_CANT_AGGREGATE_NCOLLATIONS = 1271
ER_VARIABLE_IS_NOT_STRUCT = 1272
ER_UNKNOWN_COLLATION = 1273
ER_SLAVE_IGNORED_SSL_PARAMS = 1274
ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275
ER_WARN_FIELD_RESOLVED = 1276
ER_BAD_SLAVE_UNTIL_COND = 1277
ER_MISSING_SKIP_SLAVE = 1278
ER_UNTIL_COND_IGNORED = 1279
ER_WRONG_NAME_FOR_INDEX = 1280
ER_WRONG_NAME_FOR_CATALOG = 1281
ER_WARN_QC_RESIZE = 1282
ER_BAD_FT_COLUMN = 1283
ER_UNKNOWN_KEY_CACHE = 1284
ER_WARN_HOSTNAME_WONT_WORK = 1285
ER_UNKNOWN_STORAGE_ENGINE = 1286
ER_WARN_DEPRECATED_SYNTAX = 1287
ER_NON_UPDATABLE_TABLE = 1288
ER_FEATURE_DISABLED = 1289
ER_OPTION_PREVENTS_STATEMENT = 1290
ER_DUPLICATED_VALUE_IN_TYPE = 1291
ER_TRUNCATED_WRONG_VALUE = 1292
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
ER_INVALID_ON_UPDATE = 1294
ER_UNSUPPORTED_PS = 1295
ER_GET_ERRMSG = 1296
ER_GET_TEMPORARY_ERRMSG = 1297
ER_UNKNOWN_TIME_ZONE = 1298
ER_WARN_INVALID_TIMESTAMP = 1299
ER_INVALID_CHARACTER_STRING = 1300
ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301
ER_CONFLICTING_DECLARATIONS = 1302
ER_SP_NO_RECURSIVE_CREATE = 1303
ER_SP_ALREADY_EXISTS = 1304
ER_SP_DOES_NOT_EXIST = 1305
ER_SP_DROP_FAILED = 1306
ER_SP_STORE_FAILED = 1307
ER_SP_LILABEL_MISMATCH = 1308
ER_SP_LABEL_REDEFINE = 1309
ER_SP_LABEL_MISMATCH = 1310
ER_SP_UNINIT_VAR = 1311
ER_SP_BADSELECT = 1312
ER_SP_BADRETURN = 1313
ER_SP_BADSTATEMENT = 1314
ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315
ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
ER_QUERY_INTERRUPTED = 1317
ER_SP_WRONG_NO_OF_ARGS = 1318
ER_SP_COND_MISMATCH = 1319
ER_SP_NORETURN = 1320
ER_SP_NORETURNEND = 1321
ER_SP_BAD_CURSOR_QUERY = 1322
ER_SP_BAD_CURSOR_SELECT = 1323
ER_SP_CURSOR_MISMATCH = 1324
ER_SP_CURSOR_ALREADY_OPEN = 1325
ER_SP_CURSOR_NOT_OPEN = 1326
ER_SP_UNDECLARED_VAR = 1327
ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328
ER_SP_FETCH_NO_DATA = 1329
ER_SP_DUP_PARAM = 1330
ER_SP_DUP_VAR = 1331
ER_SP_DUP_COND = 1332
ER_SP_DUP_CURS = 1333
ER_SP_CANT_ALTER = 1334
ER_SP_SUBSELECT_NYI = 1335
ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
ER_SP_VARCOND_AFTER_CURSHNDLR = 1337
ER_SP_CURSOR_AFTER_HANDLER = 1338
ER_SP_CASE_NOT_FOUND = 1339
ER_FPARSER_TOO_BIG_FILE = 1340
ER_FPARSER_BAD_HEADER = 1341
ER_FPARSER_EOF_IN_COMMENT = 1342
ER_FPARSER_ERROR_IN_PARAMETER = 1343
ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
ER_VIEW_NO_EXPLAIN = 1345
ER_FRM_UNKNOWN_TYPE = 1346
ER_WRONG_OBJECT = 1347
ER_NONUPDATEABLE_COLUMN = 1348
ER_VIEW_SELECT_DERIVED_UNUSED = 1349
ER_VIEW_SELECT_CLAUSE = 1350
ER_VIEW_SELECT_VARIABLE = 1351
ER_VIEW_SELECT_TMPTABLE = 1352
ER_VIEW_WRONG_LIST = 1353
ER_WARN_VIEW_MERGE = 1354
ER_WARN_VIEW_WITHOUT_KEY = 1355
ER_VIEW_INVALID = 1356
ER_SP_NO_DROP_SP = 1357
ER_SP_GOTO_IN_HNDLR = 1358
ER_TRG_ALREADY_EXISTS = 1359
ER_TRG_DOES_NOT_EXIST = 1360
ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361
ER_TRG_CANT_CHANGE_ROW = 1362
ER_TRG_NO_SUCH_ROW_IN_TRG = 1363
ER_NO_DEFAULT_FOR_FIELD = 1364
ER_DIVISION_BY_ZERO = 1365
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ER_ILLEGAL_VALUE_FOR_TYPE = 1367
ER_VIEW_NONUPD_CHECK = 1368
ER_VIEW_CHECK_FAILED = 1369
ER_PROCACCESS_DENIED_ERROR = 1370
ER_RELAY_LOG_FAIL = 1371
ER_PASSWD_LENGTH = 1372
ER_UNKNOWN_TARGET_BINLOG = 1373
ER_IO_ERR_LOG_INDEX_READ = 1374
ER_BINLOG_PURGE_PROHIBITED = 1375
ER_FSEEK_FAIL = 1376
ER_BINLOG_PURGE_FATAL_ERR = 1377
ER_LOG_IN_USE = 1378
ER_LOG_PURGE_UNKNOWN_ERR = 1379
ER_RELAY_LOG_INIT = 1380
ER_NO_BINARY_LOGGING = 1381
ER_RESERVED_SYNTAX = 1382
ER_WSAS_FAILED = 1383
ER_DIFF_GROUPS_PROC = 1384
ER_NO_GROUP_FOR_PROC = 1385
ER_ORDER_WITH_PROC = 1386
ER_LOGGING_PROHIBIT_CHANGING_OF = 1387
ER_NO_FILE_MAPPING = 1388
ER_WRONG_MAGIC = 1389
ER_PS_MANY_PARAM = 1390
ER_KEY_PART_0 = 1391
ER_VIEW_CHECKSUM = 1392
ER_VIEW_MULTIUPDATE = 1393
ER_VIEW_NO_INSERT_FIELD_LIST = 1394
ER_VIEW_DELETE_MERGE_VIEW = 1395
ER_CANNOT_USER = 1396
ER_XAER_NOTA = 1397
ER_XAER_INVAL = 1398
ER_XAER_RMFAIL = 1399
ER_XAER_OUTSIDE = 1400
ER_XAER_RMERR = 1401
ER_XA_RBROLLBACK = 1402
ER_NONEXISTING_PROC_GRANT = 1403
ER_PROC_AUTO_GRANT_FAIL = 1404
ER_PROC_AUTO_REVOKE_FAIL = 1405
ER_DATA_TOO_LONG = 1406
ER_SP_BAD_SQLSTATE = 1407
ER_STARTUP = 1408
ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
ER_CANT_CREATE_USER_WITH_GRANT = 1410
ER_WRONG_VALUE_FOR_TYPE = 1411
ER_TABLE_DEF_CHANGED = 1412
ER_SP_DUP_HANDLER = 1413
ER_SP_NOT_VAR_ARG = 1414
ER_SP_NO_RETSET = 1415
ER_CANT_CREATE_GEOMETRY_OBJECT = 1416
ER_FAILED_ROUTINE_BREAK_BINLOG = 1417
ER_BINLOG_UNSAFE_ROUTINE = 1418
ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420
ER_STMT_HAS_NO_OPEN_CURSOR = 1421
ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423
ER_SP_NO_RECURSION = 1424
ER_TOO_BIG_SCALE = 1425
ER_TOO_BIG_PRECISION = 1426
ER_M_BIGGER_THAN_D = 1427
ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428
ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430
ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
ER_FOREIGN_DATA_STRING_INVALID = 1433
ER_CANT_CREATE_FEDERATED_TABLE = 1434
ER_TRG_IN_WRONG_SCHEMA = 1435
ER_STACK_OVERRUN_NEED_MORE = 1436
ER_TOO_LONG_BODY = 1437
ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
ER_TOO_BIG_DISPLAYWIDTH = 1439
ER_XAER_DUPID = 1440
ER_DATETIME_FUNCTION_OVERFLOW = 1441
ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
ER_VIEW_PREVENT_UPDATE = 1443
ER_PS_NO_RECURSION = 1444
ER_SP_CANT_SET_AUTOCOMMIT = 1445
ER_MALFORMED_DEFINER = 1446
ER_VIEW_FRM_NO_USER = 1447
ER_VIEW_OTHER_USER = 1448
ER_NO_SUCH_USER = 1449
ER_FORBID_SCHEMA_CHANGE = 1450
ER_ROW_IS_REFERENCED_2 = 1451
ER_NO_REFERENCED_ROW_2 = 1452
ER_SP_BAD_VAR_SHADOW = 1453
ER_TRG_NO_DEFINER = 1454
ER_OLD_FILE_FORMAT = 1455
ER_SP_RECURSION_LIMIT = 1456
ER_SP_PROC_TABLE_CORRUPT = 1457
ER_SP_WRONG_NAME = 1458
ER_TABLE_NEEDS_UPGRADE = 1459
ER_SP_NO_AGGREGATE = 1460
ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461
ER_VIEW_RECURSIVE = 1462
ER_NON_GROUPING_FIELD_USED = 1463
ER_TABLE_CANT_HANDLE_SPKEYS = 1464
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
ER_REMOVED_SPACES = 1466
ER_AUTOINC_READ_FAILED = 1467
ER_USERNAME = 1468
ER_HOSTNAME = 1469
ER_WRONG_STRING_LENGTH = 1470
ER_NON_INSERTABLE_TABLE = 1471
ER_ADMIN_WRONG_MRG_TABLE = 1472
ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
ER_NAME_BECOMES_EMPTY = 1474
ER_AMBIGUOUS_FIELD_TERM = 1475
ER_FOREIGN_SERVER_EXISTS = 1476
ER_FOREIGN_SERVER_DOESNT_EXIST = 1477
ER_ILLEGAL_HA_CREATE_OPTION = 1478
ER_PARTITION_REQUIRES_VALUES_ERROR = 1479
ER_PARTITION_WRONG_VALUES_ERROR = 1480
ER_PARTITION_MAXVALUE_ERROR = 1481
ER_PARTITION_SUBPARTITION_ERROR = 1482
ER_PARTITION_SUBPART_MIX_ERROR = 1483
ER_PARTITION_WRONG_NO_PART_ERROR = 1484
ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485
ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487
ER_FIELD_NOT_FOUND_PART_ERROR = 1488
ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489
ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
ER_RANGE_NOT_INCREASING_ERROR = 1493
ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
ER_PARTITION_ENTRY_ERROR = 1496
ER_MIX_HANDLER_ERROR = 1497
ER_PARTITION_NOT_DEFINED_ERROR = 1498
ER_TOO_MANY_PARTITIONS_ERROR = 1499
ER_SUBPARTITION_ERROR = 1500
ER_CANT_CREATE_HANDLER_FILE = 1501
ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
ER_NO_PARTS_ERROR = 1504
ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505
ER_FOREIGN_KEY_ON_PARTITIONED = 1506
ER_DROP_PARTITION_NON_EXISTENT = 1507
ER_DROP_LAST_PARTITION = 1508
ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509
ER_REORG_HASH_ONLY_ON_SAME_NO = 1510
ER_REORG_NO_PARAM_ERROR = 1511
ER_ONLY_ON_RANGE_LIST_PARTITION = 1512
ER_ADD_PARTITION_SUBPART_ERROR = 1513
ER_ADD_PARTITION_NO_NEW_PARTITION = 1514
ER_COALESCE_PARTITION_NO_PARTITION = 1515
ER_REORG_PARTITION_NOT_EXIST = 1516
ER_SAME_NAME_PARTITION = 1517
ER_NO_BINLOG_ERROR = 1518
ER_CONSECUTIVE_REORG_PARTITIONS = 1519
ER_REORG_OUTSIDE_RANGE = 1520
ER_PARTITION_FUNCTION_FAILURE = 1521
ER_PART_STATE_ERROR = 1522
ER_LIMITED_PART_RANGE = 1523
ER_PLUGIN_IS_NOT_LOADED = 1524
ER_WRONG_VALUE = 1525
ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526
ER_FILEGROUP_OPTION_ONLY_ONCE = 1527
ER_CREATE_FILEGROUP_FAILED = 1528
ER_DROP_FILEGROUP_FAILED = 1529
ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530
ER_WRONG_SIZE_NUMBER = 1531
ER_SIZE_OVERFLOW_ERROR = 1532
ER_ALTER_FILEGROUP_FAILED = 1533
ER_BINLOG_ROW_LOGGING_FAILED = 1534
ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535
ER_BINLOG_ROW_RBR_TO_SBR = 1536
ER_EVENT_ALREADY_EXISTS = 1537
ER_EVENT_STORE_FAILED = 1538
ER_EVENT_DOES_NOT_EXIST = 1539
ER_EVENT_CANT_ALTER = 1540
ER_EVENT_DROP_FAILED = 1541
ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
ER_EVENT_ENDS_BEFORE_STARTS = 1543
ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544
ER_EVENT_OPEN_TABLE_FAILED = 1545
ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546
ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547
ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE = 1548
ER_EVENT_CANNOT_DELETE = 1549
ER_EVENT_COMPILE_ERROR = 1550
ER_EVENT_SAME_NAME = 1551
ER_EVENT_DATA_TOO_LONG = 1552
ER_DROP_INDEX_FK = 1553
ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
ER_CANT_WRITE_LOCK_LOG_TABLE = 1555
ER_CANT_LOCK_LOG_TABLE = 1556
ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561
ER_PARTITION_NO_TEMPORARY = 1562
ER_PARTITION_CONST_DOMAIN_ERROR = 1563
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
ER_DDL_LOG_ERROR = 1565
ER_NULL_IN_VALUES_LESS_THAN = 1566
ER_WRONG_PARTITION_NAME = 1567
ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568
ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569
ER_EVENT_MODIFY_QUEUE_ERROR = 1570
ER_EVENT_SET_VAR_ERROR = 1571
ER_PARTITION_MERGE_ERROR = 1572
ER_CANT_ACTIVATE_LOG = 1573
ER_RBR_NOT_AVAILABLE = 1574
ER_BASE64_DECODE_ERROR = 1575
ER_EVENT_RECURSION_FORBIDDEN = 1576
ER_EVENTS_DB_ERROR = 1577
ER_ONLY_INTEGERS_ALLOWED = 1578
ER_UNSUPORTED_LOG_ENGINE = 1579
ER_BAD_LOG_STATEMENT = 1580
ER_CANT_RENAME_LOG_TABLE = 1581
ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584
ER_NATIVE_FCT_NAME_COLLISION = 1585
ER_DUP_ENTRY_WITH_KEY_NAME = 1586
ER_BINLOG_PURGE_EMFILE = 1587
ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
ER_SLAVE_INCIDENT = 1590
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
ER_BINLOG_UNSAFE_STATEMENT = 1592
ER_SLAVE_FATAL_ERROR = 1593
ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594
ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595
ER_SLAVE_CREATE_EVENT_FAILURE = 1596
ER_SLAVE_MASTER_COM_FAILURE = 1597
ER_BINLOG_LOGGING_IMPOSSIBLE = 1598
ER_VIEW_NO_CREATION_CTX = 1599
ER_VIEW_INVALID_CREATION_CTX = 1600
ER_SR_INVALID_CREATION_CTX = 1601
ER_TRG_CORRUPTED_FILE = 1602
ER_TRG_NO_CREATION_CTX = 1603
ER_TRG_INVALID_CREATION_CTX = 1604
ER_EVENT_INVALID_CREATION_CTX = 1605
ER_TRG_CANT_OPEN_TABLE = 1606
ER_CANT_CREATE_SROUTINE = 1607
ER_NEVER_USED = 1608
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
ER_SLAVE_CORRUPT_EVENT = 1610
ER_LOAD_DATA_INVALID_COLUMN = 1611
ER_LOG_PURGE_NO_FILE = 1612
ER_XA_RBTIMEOUT = 1613
ER_XA_RBDEADLOCK = 1614
ER_NEED_REPREPARE = 1615
ER_DELAYED_NOT_SUPPORTED = 1616
WARN_NO_MASTER_INFO = 1617
WARN_OPTION_IGNORED = 1618
ER_PLUGIN_DELETE_BUILTIN = 1619
WARN_PLUGIN_BUSY = 1620
ER_VARIABLE_IS_READONLY = 1621
ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
ER_SLAVE_HEARTBEAT_FAILURE = 1623
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
ER_NDB_REPLICATION_SCHEMA_ERROR = 1625
ER_CONFLICT_FN_PARSE_ERROR = 1626
ER_EXCEPTIONS_WRITE_ERROR = 1627
ER_TOO_LONG_TABLE_COMMENT = 1628
ER_TOO_LONG_FIELD_COMMENT = 1629
ER_FUNC_INEXISTENT_NAME_COLLISION = 1630
ER_DATABASE_NAME = 1631
ER_TABLE_NAME = 1632
ER_PARTITION_NAME = 1633
ER_SUBPARTITION_NAME = 1634
ER_TEMPORARY_NAME = 1635
ER_RENAMED_NAME = 1636
ER_TOO_MANY_CONCURRENT_TRXS = 1637
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
ER_DEBUG_SYNC_TIMEOUT = 1639
ER_DEBUG_SYNC_HIT_LIMIT = 1640
ER_DUP_SIGNAL_SET = 1641
ER_SIGNAL_WARN = 1642
ER_SIGNAL_NOT_FOUND = 1643
ER_SIGNAL_EXCEPTION = 1644
ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
ER_SIGNAL_BAD_CONDITION_TYPE = 1646
WARN_COND_ITEM_TRUNCATED = 1647
ER_COND_ITEM_TOO_LONG = 1648
ER_UNKNOWN_LOCALE = 1649
ER_SLAVE_IGNORE_SERVER_IDS = 1650
ER_QUERY_CACHE_DISABLED = 1651
ER_SAME_NAME_PARTITION_FIELD = 1652
ER_PARTITION_COLUMN_LIST_ERROR = 1653
ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
ER_MAXVALUE_IN_VALUES_IN = 1656
ER_TOO_MANY_VALUES_ERROR = 1657
ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
ER_PARTITION_FIELDS_TOO_LONG = 1660
ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
ER_BINLOG_UNSAFE_LIMIT = 1668
ER_UNUSED4 = 1669
ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670
ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
ER_BINLOG_UNSAFE_UDF = 1672
ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
ER_MESSAGE_AND_STATEMENT = 1676
ER_SLAVE_CONVERSION_FAILED = 1677
ER_SLAVE_CANT_CREATE_CONVERSION = 1678
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
ER_PATH_LENGTH = 1680
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682
ER_WRONG_PERFSCHEMA_USAGE = 1683
ER_WARN_I_S_SKIPPED_TABLE = 1684
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687
ER_TOO_LONG_INDEX_COMMENT = 1688
ER_LOCK_ABORTED = 1689
ER_DATA_OUT_OF_RANGE = 1690
ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
ER_FAILED_READ_FROM_PAR_FILE = 1696
ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697
ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
ER_SET_PASSWORD_AUTH_PLUGIN = 1699
ER_GRANT_PLUGIN_USER_EXISTS = 1700
ER_TRUNCATE_ILLEGAL_FK = 1701
ER_PLUGIN_IS_PERMANENT = 1702
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
ER_STMT_CACHE_FULL = 1705
ER_MULTI_UPDATE_KEY_CONFLICT = 1706
ER_TABLE_NEEDS_REBUILD = 1707
WARN_OPTION_BELOW_LIMIT = 1708
ER_INDEX_COLUMN_TOO_LONG = 1709
ER_ERROR_IN_TRIGGER_BODY = 1710
ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
ER_INDEX_CORRUPT = 1712
ER_UNDO_RECORD_TOO_BIG = 1713
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719
ER_PLUGIN_NO_UNINSTALL = 1720
ER_PLUGIN_NO_INSTALL = 1721
ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
ER_TABLE_IN_FK_CHECK = 1725
ER_UNSUPPORTED_ENGINE = 1726
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728
ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
ER_PARTITION_EXCHANGE_PART_TABLE = 1732
ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733
ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734
ER_UNKNOWN_PARTITION = 1735
ER_TABLES_DIFFERENT_METADATA = 1736
ER_ROW_DOES_NOT_MATCH_PARTITION = 1737
ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
ER_WARN_INDEX_NOT_APPLICABLE = 1739
ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740
ER_NO_SUCH_KEY_VALUE = 1741
ER_RPL_INFO_DATA_TOO_LONG = 1742
ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743
ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744
ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
ER_NO_SUCH_PARTITION__UNUSED = 1749
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753
ER_MTS_UPDATED_DBS_GREATER_MAX = 1754
ER_MTS_CANT_PARALLEL = 1755
ER_MTS_INCONSISTENT_DATA = 1756
ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
ER_DA_INVALID_CONDITION_NUMBER = 1758
ER_INSECURE_PLAIN_TEXT = 1759
ER_INSECURE_CHANGE_MASTER = 1760
ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763
ER_TABLE_HAS_NO_FT = 1764
ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767
ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION = 1768
ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
ER_SKIPPING_LOGGED_TRANSACTION = 1771
ER_MALFORMED_GTID_SET_SPECIFICATION = 1772
ER_MALFORMED_GTID_SET_ENCODING = 1773
ER_MALFORMED_GTID_SPECIFICATION = 1774
ER_GNO_EXHAUSTED = 1775
ER_BAD_SLAVE_AUTO_POSITION = 1776
ER_AUTO_POSITION_REQUIRES_GTID_MODE_NOT_OFF = 1777
ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
ER_GTID_MODE_ON_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
ER_GTID_MODE_REQUIRES_BINLOG = 1780
ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF__UNUSED = 1784
ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
ER_GTID_UNSAFE_CREATE_SELECT = 1786
ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
ER_UNKNOWN_EXPLAIN_FORMAT = 1791
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793
ER_SLAVE_CONFIGURATION = 1794
ER_INNODB_FT_LIMIT = 1795
ER_INNODB_NO_FT_TEMP_TABLE = 1796
ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797
ER_INNODB_FT_WRONG_DOCID_INDEX = 1798
ER_INNODB_ONLINE_LOG_TOO_BIG = 1799
ER_UNKNOWN_ALTER_ALGORITHM = 1800
ER_UNKNOWN_ALTER_LOCK = 1801
ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
ER_MTS_RECOVERY_FAILURE = 1803
ER_MTS_RESET_WORKERS = 1804
ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806
ER_DISCARD_FK_CHECKS_RUNNING = 1807
ER_TABLE_SCHEMA_MISMATCH = 1808
ER_TABLE_IN_SYSTEM_TABLESPACE = 1809
ER_IO_READ_ERROR = 1810
ER_IO_WRITE_ERROR = 1811
ER_TABLESPACE_MISSING = 1812
ER_TABLESPACE_EXISTS = 1813
ER_TABLESPACE_DISCARDED = 1814
ER_INTERNAL_ERROR = 1815
ER_INNODB_IMPORT_ERROR = 1816
ER_INNODB_INDEX_CORRUPT = 1817
ER_INVALID_YEAR_COLUMN_LENGTH = 1818
ER_NOT_VALID_PASSWORD = 1819
ER_MUST_CHANGE_PASSWORD = 1820
ER_FK_NO_INDEX_CHILD = 1821
ER_FK_NO_INDEX_PARENT = 1822
ER_FK_FAIL_ADD_SYSTEM = 1823
ER_FK_CANNOT_OPEN_PARENT = 1824
ER_FK_INCORRECT_OPTION = 1825
ER_FK_DUP_NAME = 1826
ER_PASSWORD_FORMAT = 1827
ER_FK_COLUMN_CANNOT_DROP = 1828
ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829
ER_FK_COLUMN_NOT_NULL = 1830
ER_DUP_INDEX = 1831
ER_FK_COLUMN_CANNOT_CHANGE = 1832
ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
ER_UNUSED5 = 1834
ER_MALFORMED_PACKET = 1835
ER_READ_ONLY_MODE = 1836
ER_GTID_NEXT_TYPE_UNDEFINED_GROUP = 1837
ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838
ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839
ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
ER_GTID_PURGED_WAS_CHANGED = 1842
ER_GTID_EXECUTED_WAS_CHANGED = 1843
ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
ER_ALTER_OPERATION_NOT_SUPPORTED = 1845
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
ER_UNUSED6 = 1852
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
ER_DUP_UNKNOWN_IN_INDEX = 1859
ER_IDENT_CAUSES_TOO_LONG_PATH = 1860
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
ER_MUST_CHANGE_PASSWORD_LOGIN = 1862
ER_ROW_IN_WRONG_PARTITION = 1863
ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX = 1864
ER_INNODB_NO_FT_USES_PARSER = 1865
ER_BINLOG_LOGICAL_CORRUPTION = 1866
ER_WARN_PURGE_LOG_IN_USE = 1867
ER_WARN_PURGE_LOG_IS_ACTIVE = 1868
ER_AUTO_INCREMENT_CONFLICT = 1869
WARN_ON_BLOCKHOLE_IN_RBR = 1870
ER_SLAVE_MI_INIT_REPOSITORY = 1871
ER_SLAVE_RLI_INIT_REPOSITORY = 1872
ER_ACCESS_DENIED_CHANGE_USER_ERROR = 1873
ER_INNODB_READ_ONLY = 1874
ER_STOP_SLAVE_SQL_THREAD_TIMEOUT = 1875
ER_STOP_SLAVE_IO_THREAD_TIMEOUT = 1876
ER_TABLE_CORRUPT = 1877
ER_TEMP_FILE_WRITE_FAILURE = 1878
ER_INNODB_FT_AUX_NOT_HEX_ID = 1879
ER_OLD_TEMPORALS_UPGRADED = 1880
ER_INNODB_FORCED_RECOVERY = 1881
ER_AES_INVALID_IV = 1882
ER_PLUGIN_CANNOT_BE_UNINSTALLED = 1883
ER_GTID_UNSAFE_BINLOG_SPLITTABLE_STATEMENT_AND_GTID_GROUP = 1884
ER_SLAVE_HAS_MORE_GTIDS_THAN_MASTER = 1885
ER_FILE_CORRUPT = 3000
ER_ERROR_ON_MASTER = 3001
ER_INCONSISTENT_ERROR = 3002
ER_STORAGE_ENGINE_NOT_LOADED = 3003
ER_GET_STACKED_DA_WITHOUT_ACTIVE_HANDLER = 3004
ER_WARN_LEGACY_SYNTAX_CONVERTED = 3005
ER_BINLOG_UNSAFE_FULLTEXT_PLUGIN = 3006
ER_CANNOT_DISCARD_TEMPORARY_TABLE = 3007
ER_FK_DEPTH_EXCEEDED = 3008
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE_V2 = 3009
ER_WARN_TRIGGER_DOESNT_HAVE_CREATED = 3010
ER_REFERENCED_TRG_DOES_NOT_EXIST = 3011
ER_EXPLAIN_NOT_SUPPORTED = 3012
ER_INVALID_FIELD_SIZE = 3013
ER_MISSING_HA_CREATE_OPTION = 3014
ER_ENGINE_OUT_OF_MEMORY = 3015
ER_PASSWORD_EXPIRE_ANONYMOUS_USER = 3016
ER_SLAVE_SQL_THREAD_MUST_STOP = 3017
ER_NO_FT_MATERIALIZED_SUBQUERY = 3018
ER_INNODB_UNDO_LOG_FULL = 3019
ER_INVALID_ARGUMENT_FOR_LOGARITHM = 3020
ER_SLAVE_CHANNEL_IO_THREAD_MUST_STOP = 3021
ER_WARN_OPEN_TEMP_TABLES_MUST_BE_ZERO = 3022
ER_WARN_ONLY_MASTER_LOG_FILE_NO_POS = 3023
ER_QUERY_TIMEOUT = 3024
ER_NON_RO_SELECT_DISABLE_TIMER = 3025
ER_DUP_LIST_ENTRY = 3026
ER_SQL_MODE_NO_EFFECT = 3027
ER_AGGREGATE_ORDER_FOR_UNION = 3028
ER_AGGREGATE_ORDER_NON_AGG_QUERY = 3029
ER_SLAVE_WORKER_STOPPED_PREVIOUS_THD_ERROR = 3030
ER_DONT_SUPPORT_SLAVE_PRESERVE_COMMIT_ORDER = 3031
ER_SERVER_OFFLINE_MODE = 3032
ER_GIS_DIFFERENT_SRIDS = 3033
ER_GIS_UNSUPPORTED_ARGUMENT = 3034
ER_GIS_UNKNOWN_ERROR = 3035
ER_GIS_UNKNOWN_EXCEPTION = 3036
ER_GIS_INVALID_DATA = 3037
ER_BOOST_GEOMETRY_EMPTY_INPUT_EXCEPTION = 3038
ER_BOOST_GEOMETRY_CENTROID_EXCEPTION = 3039
ER_BOOST_GEOMETRY_OVERLAY_INVALID_INPUT_EXCEPTION = 3040
ER_BOOST_GEOMETRY_TURN_INFO_EXCEPTION = 3041
ER_BOOST_GEOMETRY_SELF_INTERSECTION_POINT_EXCEPTION = 3042
ER_BOOST_GEOMETRY_UNKNOWN_EXCEPTION = 3043
ER_STD_BAD_ALLOC_ERROR = 3044
ER_STD_DOMAIN_ERROR = 3045
ER_STD_LENGTH_ERROR = 3046
ER_STD_INVALID_ARGUMENT = 3047
ER_STD_OUT_OF_RANGE_ERROR = 3048
ER_STD_OVERFLOW_ERROR = 3049
ER_STD_RANGE_ERROR = 3050
ER_STD_UNDERFLOW_ERROR = 3051
ER_STD_LOGIC_ERROR = 3052
ER_STD_RUNTIME_ERROR = 3053
ER_STD_UNKNOWN_EXCEPTION = 3054
ER_GIS_DATA_WRONG_ENDIANESS = 3055
ER_CHANGE_MASTER_PASSWORD_LENGTH = 3056
ER_USER_LOCK_WRONG_NAME = 3057
ER_USER_LOCK_DEADLOCK = 3058
ER_REPLACE_INACCESSIBLE_ROWS = 3059
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS = 3060
ER_ILLEGAL_USER_VAR = 3061
ER_GTID_MODE_OFF = 3062
ER_UNSUPPORTED_BY_REPLICATION_THREAD = 3063
ER_INCORRECT_TYPE = 3064
ER_FIELD_IN_ORDER_NOT_SELECT = 3065
ER_AGGREGATE_IN_ORDER_NOT_SELECT = 3066
ER_INVALID_RPL_WILD_TABLE_FILTER_PATTERN = 3067
ER_NET_OK_PACKET_TOO_LARGE = 3068
ER_INVALID_JSON_DATA = 3069
ER_INVALID_GEOJSON_MISSING_MEMBER = 3070
ER_INVALID_GEOJSON_WRONG_TYPE = 3071
ER_INVALID_GEOJSON_UNSPECIFIED = 3072
ER_DIMENSION_UNSUPPORTED = 3073
ER_SLAVE_CHANNEL_DOES_NOT_EXIST = 3074
ER_SLAVE_MULTIPLE_CHANNELS_HOST_PORT = 3075
ER_SLAVE_CHANNEL_NAME_INVALID_OR_TOO_LONG = 3076
ER_SLAVE_NEW_CHANNEL_WRONG_REPOSITORY = 3077
ER_SLAVE_CHANNEL_DELETE = 3078
ER_SLAVE_MULTIPLE_CHANNELS_CMD = 3079
ER_SLAVE_MAX_CHANNELS_EXCEEDED = 3080
ER_SLAVE_CHANNEL_MUST_STOP = 3081
ER_SLAVE_CHANNEL_NOT_RUNNING = 3082
ER_SLAVE_CHANNEL_WAS_RUNNING = 3083
ER_SLAVE_CHANNEL_WAS_NOT_RUNNING = 3084
ER_SLAVE_CHANNEL_SQL_THREAD_MUST_STOP = 3085
ER_SLAVE_CHANNEL_SQL_SKIP_COUNTER = 3086
ER_WRONG_FIELD_WITH_GROUP_V2 = 3087
ER_MIX_OF_GROUP_FUNC_AND_FIELDS_V2 = 3088
ER_WARN_DEPRECATED_SYSVAR_UPDATE = 3089
ER_WARN_DEPRECATED_SQLMODE = 3090
ER_CANNOT_LOG_PARTIAL_DROP_DATABASE_WITH_GTID = 3091
ER_GROUP_REPLICATION_CONFIGURATION = 3092
ER_GROUP_REPLICATION_RUNNING = 3093
ER_GROUP_REPLICATION_APPLIER_INIT_ERROR = 3094
ER_GROUP_REPLICATION_STOP_APPLIER_THREAD_TIMEOUT = 3095
ER_GROUP_REPLICATION_COMMUNICATION_LAYER_SESSION_ERROR = 3096
ER_GROUP_REPLICATION_COMMUNICATION_LAYER_JOIN_ERROR = 3097
ER_BEFORE_DML_VALIDATION_ERROR = 3098
ER_PREVENTS_VARIABLE_WITHOUT_RBR = 3099
ER_RUN_HOOK_ERROR = 3100
ER_TRANSACTION_ROLLBACK_DURING_COMMIT = 3101
ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED = 3102
ER_KEY_BASED_ON_GENERATED_COLUMN = 3103
ER_WRONG_FK_OPTION_FOR_GENERATED_COLUMN = 3104
ER_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN = 3105
ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN = 3106
ER_GENERATED_COLUMN_NON_PRIOR = 3107
ER_DEPENDENT_BY_GENERATED_COLUMN = 3108
ER_GENERATED_COLUMN_REF_AUTO_INC = 3109
ER_FEATURE_NOT_AVAILABLE = 3110
ER_CANT_SET_GTID_MODE = 3111
ER_CANT_USE_AUTO_POSITION_WITH_GTID_MODE_OFF = 3112
ER_CANT_REPLICATE_ANONYMOUS_WITH_AUTO_POSITION = 3113
ER_CANT_REPLICATE_ANONYMOUS_WITH_GTID_MODE_ON = 3114
ER_CANT_REPLICATE_GTID_WITH_GTID_MODE_OFF = 3115
ER_CANT_SET_ENFORCE_GTID_CONSISTENCY_ON_WITH_ONGOING_GTID_VIOLATING_TRANSACTIONS = 3116
ER_SET_ENFORCE_GTID_CONSISTENCY_WARN_WITH_ONGOING_GTID_VIOLATING_TRANSACTIONS = 3117
ER_ACCOUNT_HAS_BEEN_LOCKED = 3118
ER_WRONG_TABLESPACE_NAME = 3119
ER_TABLESPACE_IS_NOT_EMPTY = 3120
ER_WRONG_FILE_NAME = 3121
ER_BOOST_GEOMETRY_INCONSISTENT_TURNS_EXCEPTION = 3122
ER_WARN_OPTIMIZER_HINT_SYNTAX_ERROR = 3123
ER_WARN_BAD_MAX_EXECUTION_TIME = 3124
ER_WARN_UNSUPPORTED_MAX_EXECUTION_TIME = 3125
ER_WARN_CONFLICTING_HINT = 3126
ER_WARN_UNKNOWN_QB_NAME = 3127
ER_UNRESOLVED_HINT_NAME = 3128
ER_WARN_DEPRECATED_SQLMODE_UNSET = 3129
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_UNUSED_1 = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_INSECURE_API_ERR = 2062
# End MySQL Errors
|
anupkdas-nus/global_synapses
|
refs/heads/master
|
pyNN-dispackgaes/nineml/synapses.py
|
1
|
"""
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
from utility import catalog_url
class StaticSynapticConnection(object):
definition_url = "%s/connectiontypes/static_connection.xml" % catalog_url
|
SysCompass/compass-core
|
refs/heads/master
|
compass/hdsdiscovery/hdmanager.py
|
2
|
"""Manage hdsdiscovery functionalities"""
import os
import re
import logging
from compass.hdsdiscovery import utils
class HDManager:
"""Process a request."""
def __init__(self):
base_dir = os.path.dirname(os.path.realpath(__file__))
self.vendors_dir = os.path.join(base_dir, 'vendors')
self.vendor_plugins_dir = os.path.join(self.vendors_dir, '?/plugins')
def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs):
"""Insert/update record of switch_info. Get expected results from
switch according to sepcific operation.
:param req_obj: the object of a machine
:param host: switch IP address
:param credientials: credientials to access switch
:param oper: operations of the plugin (SCAN, GETONE, SET)
:param kwargs(optional): key-value pairs
"""
plugin_dir = self.vendor_plugins_dir.replace('?', vendor)
if not os.path.exists(plugin_dir):
logging.error('No such directory: %s', plugin_dir)
return None
plugin = utils.load_module(req_obj, plugin_dir, host, credential)
if not plugin:
# No plugin found!
#TODO add more code to catch excpetion or unexpected state
logging.error('no plugin %s to load from %s', req_obj, plugin_dir)
return None
return plugin.process_data(oper)
def is_valid_vendor(self, host, credential, vendor):
""" Check if vendor is associated with this host and credential
:param host: switch ip
:param credential: credential to access switch
:param vendor: the vendor of switch
"""
vendor_dir = os.path.join(self.vendors_dir, vendor)
if not os.path.exists(vendor_dir):
logging.error('no such directory: %s', vendor_dir)
return False
vendor_instance = utils.load_module(vendor, vendor_dir)
#TODO add more code to catch excpetion or unexpected state
if not vendor_instance:
# Cannot found the vendor in the directory!
logging.error('no vendor instance %s load from %s',
vendor, vendor_dir)
return False
return vendor_instance.is_this_vendor(host, credential)
def get_vendor(self, host, credential):
""" Check and get vendor of the switch.
:param host: switch ip:
:param credential: credential to access switch
"""
# List all vendors in vendors directory -- a directory but hidden
# under ../vendors
all_vendors = sorted(o for o in os.listdir(self.vendors_dir)
if os.path.isdir(os.path.join(self.vendors_dir, o))
and re.match(r'^[^\.]', o))
logging.debug("[get_vendor]: %s ", all_vendors)
for vname in all_vendors:
vpath = os.path.join(self.vendors_dir, vname)
instance = utils.load_module(vname, vpath)
#TODO add more code to catch excpetion or unexpected state
if not instance:
logging.error('no instance %s load from %s', vname, vpath)
continue
if instance.is_this_vendor(host, credential):
return vname
return None
|
chanceraine/nupic
|
refs/heads/master
|
tests/integration/nupic/engine/vector_file_sensor_test.py
|
34
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests VectorFileSensor exhaustively using the sessions interface.
Need to add tests for parameters:
loading and appending CSV files
test for recentFile
"""
import os
import pkg_resources
import unittest2 as unittest
from nupic.engine import Array, Dimensions, Network
g_filename = pkg_resources.resource_filename(__name__, "data/vectorfile.nta")
g_dataFile = pkg_resources.resource_filename(__name__,
"data/vectortestdata.txt")
g_dataFile2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.txt")
g_dataFileCSV = pkg_resources.resource_filename(__name__,
"data/vectortestdata.csv")
g_dataFileCSV2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.csv")
g_dataFileCSV3 = pkg_resources.resource_filename(__name__,
"data/vectortestdata3.csv")
g_dataFileLF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.lf4")
g_dataFileBF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.bf4")
g_dataFileIDX = pkg_resources.resource_filename(__name__,
"data/vectortestdata.idx")
class VectorFileSensorTest(unittest.TestCase):
"""Class for testing the VectorFileSensor plugin by loading a known network
with a single VectorFileSensor node and a known data file."""
def setUp(self):
self.filename = g_filename
self.nodeName = "TestSensor"
self.sensorName = "VectorFileSensor"
self.dataFile = g_dataFile
self.dataFile2 = g_dataFile2
self.dataFile3a = g_dataFileCSV
self.dataFile3b = g_dataFileCSV2
self.dataFile3c = g_dataFileCSV3
self.dataFile4 = g_dataFileLF4
self.dataFile5 = g_dataFileBF4
self.dataFile6 = g_dataFileIDX
self.numTests = 333
self.testsPassed = 0
self.testFailures = []
self.sensor = None
def testAll(self):
"""Run all the tests in our suite, catching any exceptions that might be
thrown.
"""
print 'VectorFileSensorTest parameters:'
print 'PYTHONPATH: %s' % os.environ.get('PYTHONPATH', 'NOT SET')
print 'filename: %s' % self.filename
self._testRunWithoutFile()
self._testNetLoad()
self._testFakeLoadFile()
self._testRepeatCount()
self._testUnknownCommand()
# Test maxOutput and activeOutputCount
self._testOutputCounts(0)
self._testLoadFile(self.dataFile, '0', '0')
self._testOutputCounts(5)
# Test a sequence of loads, runs, appends, etc.
self._testLoadFile(self.dataFile, '0', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '2', '0')
self._testRun()
self._testLoadFile(self.dataFile3a, '3', '0')
self._testRun()
self._testLoadFile(self.dataFile4, '4', '0')
self._testRun()
self._testLoadFile(self.dataFile5, '5', '0')
self._testRun()
self._testLoadFile(self.dataFile6, '6', '0')
self._testRun()
self._testPosition()
self._testAppendFile(self.dataFile2, '2', '1', 10)
self._testAppendFile(self.dataFile, '0', '1', 15)
self._testRun()
self._testScaling(self.dataFile3b, '3')
# Test optional categoryOut and resetOut
self.sensor.setParameter('hasCategoryOut', 1)
self.sensor.setParameter('hasResetOut', 1)
self._testLoadFile(self.dataFile3c, '3', '0')
self._testOptionalOutputs()
self.sensor.setParameter('hasCategoryOut', 0)
self.sensor.setParameter('hasResetOut', 0)
def _testNetLoad(self):
"""Test loading a network with this sensor in it."""
n = Network()
r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}')
r.dimensions = Dimensions([1])
n.save(self.filename)
n = Network(self.filename)
n.initialize()
self.testsPassed += 1
# Check that vectorCount parameter is zero
r = n.regions[self.nodeName]
res = r.getParameter('vectorCount')
self.assertEqual(
res, 0, "getting vectorCount:\n Expected '0', got back '%d'\n" % res)
self.sensor = r
def _testFakeLoadFile(self):
"""Test reading in a fake file."""
# Loading a fake file should throw an exception
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['loadFile', 'ExistenceIsAnIllusion.txt', '0'])
def _testRunWithoutFile(self):
"""Test running the network without a file loaded. This should be run
before any file has been loaded in!"""
with self.assertRaises(AttributeError):
self.sensor.compute()
def _testRepeatCount(self):
"""Test setting and getting repeat count using parameters."""
# Check default repeat count
n = Network(self.filename)
sensor = n.regions[self.nodeName]
res = sensor.executeCommand(['dump'])
expected = self.sensorName + \
' isLabeled = 0 repeatCount = 1 vectorCount = 0 iterations = 0\n'
self.assertEqual(
res, expected,
"repeat count test:\n expected '%s'\n got '%s'\n" %
(expected, res))
# Set to 42, check it and return it back to 1
sensor.setParameter('repeatCount', 42)
res = sensor.getParameter('repeatCount')
self.assertEqual(
res, 42, "set repeatCount to 42:\n got back '%d'\n" % res)
res = sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 42 vectorCount = 0 '
'iterations = 0\n')
self.assertEqual(
res, expected,
"set to 42 test:\n expected '%s'\n got '%s'\n" %
(expected, res))
sensor.setParameter('repeatCount', 1)
def _testLoadFile(self, dataFile, fileFormat= '', iterations=''):
"""Test reading our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['loadFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % (res))
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 1 vectorCount = 5 iterations = ' +
iterations + '\n')
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
def _testAppendFile(self, dataFile, fileFormat= '', iterations='',
numVecs=''):
"""Test appending our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['appendFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['appendFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % res)
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = self.sensorName + ' isLabeled = 0 repeatCount = 1' + \
' vectorCount = '+str(numVecs)+' iterations = ' + iterations + '\n'
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
# Check vectorCount parameter
res = self.sensor.getParameter('vectorCount')
self.assertEqual(res, numVecs,
'getting position:\n Expected ' + str(numVecs) +
', got back "%s"\n' % res)
def _testRun(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
# Check outputs
#sum = reduce(lambda x,y:int(x)+int(y),outputs)
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
def _testOutputCounts(self, vectorCount):
"""Test maxOutputVectorCount with different repeat counts."""
# Test maxOutput with different repeat counts.
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, vectorCount,
"getting maxOutputVectorCount:\n Expected '" +
str(vectorCount) + "', got back '%d'\n" % (res))
self.sensor.setParameter('repeatCount', 3)
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, 3 * vectorCount,
'getting maxOutputVectorCount:\n Expected ' +
str(3*vectorCount)+', got back "%d"\n' % res)
self.sensor.setParameter('repeatCount', 1)
# Test activeOutputCount
res = self.sensor.getParameter('activeOutputCount')
self.assertEqual(
res, 11,
'getting activeOutputCount :\n Expected 11, got back "%d"\n' % res)
def _testPosition(self):
"""Test setting and getting position parameter. Run compute once to verify
it went to the right position."""
self.sensor.setParameter('position', 2)
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
self.assertEqual(outputs[2], 3, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), 3, 'output = %s' % str(outputs))
# Now it should have incremented the position
res = self.sensor.getParameter('position')
self.assertEqual(res, 3,
'getting position:\n Expected "3", got back "%d"\n' %
res)
def _testScaling(self, dataFile, fileFormat= ''):
"""Specific tests for setScaleVector, setOffsetVector, and scalingMode"""
# Retrieve scalingMode after a netLoad. Should be 'none'
res = self.sensor.getParameter('scalingMode')
self.assertEqual(res, 'none',
'Getting scalingMode:\n Expected "none", got back "%s"\n' %
res)
# Retrieve scaling and offset after netLoad - should be 1 and zero
# respectively.
a = Array('Real32', 11)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), '[ 1 1 1 1 1 1 1 1 1 1 1 ]',
'Error getting ones scaleVector:\n Got back "%s"\n' %
str(res))
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), '[ 0 0 0 0 0 0 0 0 0 0 0 ]',
'Error getting zero offsetVector:\n Got back "%s"\n' %
str(res))
# load data file, set scaling and offset to standardForm and check
self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.setParameter('scalingMode', 'standardForm')
self.sensor.getParameterArray('scaleVector', a)
s = ('[ 2.23607 1.11803 0.745356 0.559017 0.447214 2.23607 1.11803 '
'0.745356 0.559017 0.447214 2.23607 ]')
self.assertEqual(
str(a), s,
'Error getting standardForm scaleVector:\n Got back "%s"\n' % res)
o = '[ -0.2 -0.4 -0.6 -0.8 -1 -0.2 -0.4 -0.6 -0.8 -1 -0.2 ]'
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), o,
'Error getting standardForm offsetVector:\n Got back "%s"\n' % res)
# set to custom value and check
scaleVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1)):
scaleVector[i] = x
self.sensor.setParameterArray('scaleVector', scaleVector)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector:\n Got back "%s"\n' %
str(res))
offsetVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 3, 4, 1, 1, 1, 1, 1, 2, 1)):
offsetVector[i] = x
self.sensor.setParameterArray('offsetVector', offsetVector)
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(offsetVector),
'Error getting modified offsetVector:\n Got back "%s"\n' %
str(res))
# scalingMode should now be custom
mode = self.sensor.getParameter('scalingMode')
self.assertEqual(
mode, 'custom',
'Getting scalingMode:\n Expected "custom", got back "%s"\n' % res)
# At this point we test loading a data file using loadFile. The scaling
# params should still be active and applied to the new vectors.
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), str(offsetVector),
'Error getting modified offsetVector after loadFile:\n Got back '
'"%s"\n' % res)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector after loadFile:\n '
'Got back "%s"\n' % res)
# Set scaling mode back to none and retrieve scaling and offset - should
# be 1 and zero respectively.
self.sensor.setParameter('scalingMode', 'none')
self.sensor.getParameterArray('scaleVector', a)
noScaling = Array('Real32', 11)
for i in range(11):
noScaling[i] = 1
self.assertEqual(str(a), str(noScaling),
'Error getting ones scaleVector:\n Got back "%s"\n' % res)
noOffset = Array('Real32', 11)
for i in range(11):
noOffset[i] = 0
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(noOffset),
'Error getting zero offsetVector:\n Got back "%s"\n' % res)
def _testUnknownCommand(self):
"""Test that exception is thrown when unknown execute command sent."""
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['nonExistentCommand'])
def _testOptionalOutputs(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
categories = []
resetOuts = []
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
a = self.sensor.getOutputData('categoryOut')
categories.append(a[0])
a = self.sensor.getOutputData('resetOut')
resetOuts.append(a[0])
# Check outputs
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
self.assertEqual(categories, 2 * ([6] * 12 + [8] * 3))
self.assertEqual(resetOuts,
2 * [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
if __name__=='__main__':
unittest.main()
|
tapple/nsize-web
|
refs/heads/master
|
nsize/config/__init__.py
|
4
|
from __future__ import absolute_import
from .local import Local # noqa
from .production import Production # noqa
|
draugiskisprendimai/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/About.py
|
293
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
from com.sun.star.task import XJobExecutor
if __name__<>'package':
from lib.gui import *
class About(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
self.win = DBModalDialog(60, 50, 175, 115, "About Odoo Report Designer")
fdBigFont = createUnoStruct("com.sun.star.awt.FontDescriptor")
fdBigFont.Width = 20
fdBigFont.Height = 25
fdBigFont.Weight = 120
fdBigFont.Family= 3
oLabelTitle1 = self.win.addFixedText("lblTitle1", 1, 1, 35, 30)
oLabelTitle1.Model.TextColor = 16056320
oLabelTitle1.Model.FontDescriptor = fdBigFont
oLabelTitle1.Model.FontRelief = 1
oLabelTitle1.Text = "Open"
oLabelTitle2 = self.win.addFixedText("lblTitle2", 35, 1, 30, 30)
oLabelTitle2.Model.TextColor = 1
oLabelTitle2.Model.FontDescriptor = fdBigFont
oLabelTitle2.Model.FontRelief = 1
oLabelTitle2.Text = "ERP"
oLabelProdDesc = self.win.addFixedText("lblProdDesc", 1, 30, 173, 75)
oLabelProdDesc.Model.TextColor = 1
fdBigFont.Width = 10
fdBigFont.Height = 11
fdBigFont.Weight = 76
oLabelProdDesc.Model.FontDescriptor = fdBigFont
oLabelProdDesc.Model.Align = 1
oLabelProdDesc.Model.FontRelief = 1
oLabelProdDesc.Model.MultiLine = True
oLabelProdDesc.Text = "This package helps you to create or modify\nreports in Odoo. Once connected to the\nserver, you can design your template of reports\nusing fields and expressions and browsing the\ncomplete structure of Odoo object database."
oLabelFooter = self.win.addFixedText("lblFooter", -1, -1, 173, 25)
oLabelFooter.Model.TextColor = 255
#oLabelFooter.Model.BackgroundColor = 1
oLabelFooter.Model.Border = 2
oLabelFooter.Model.BorderColor = 255
fdBigFont.Width = 8
fdBigFont.Height = 9
fdBigFont.Weight = 100
oLabelFooter.Model.FontDescriptor = fdBigFont
oLabelFooter.Model.Align = 1
oLabelFooter.Model.FontRelief = 1
oLabelFooter.Model.MultiLine = True
sMessage = "Odoo Report Designer v1.0 \nCopyright 2007-TODAY Tiny sprl \nThis product is free software, under the GNU Affero General Public License."
oLabelFooter.Text = sMessage
self.win.doModalDialog("",None)
if __name__<>"package" and __name__=="__main__":
About(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( About, "org.openoffice.openerp.report.about", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vignanl/Plinth
|
refs/heads/master
|
plinth/modules/radicale/forms.py
|
9
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Forms for radicale module.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from plinth.forms import ServiceForm
CHOICES = [
('owner_only', _('Only the owner of a calendar/addressbook can view or '
'make changes.')),
('owner_write', _('Any user can view any calendar/addressbook, but only '
'the owner can make changes.')),
('authenticated', _('Any user can view or make changes to any '
'calendar/addressbook.')),
]
class RadicaleForm(ServiceForm):
"""Specialized configuration form for radicale service."""
access_rights = forms.ChoiceField(choices=CHOICES, required=True,
widget=forms.RadioSelect())
|
BenjamenMeyer/eom
|
refs/heads/master
|
eom/utils/redis_pool.py
|
3
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import redis
_CONF = cfg.CONF
REDIS_GROUP_NAME = 'eom:redis'
OPTIONS = [
cfg.StrOpt('host'),
cfg.StrOpt('port'),
]
_CONF.register_opts(OPTIONS, group=REDIS_GROUP_NAME)
def get_client():
group = _CONF[REDIS_GROUP_NAME]
pool = redis.ConnectionPool(host=group['host'], port=group['port'], db=0)
return redis.Redis(connection_pool=pool)
|
whausen/part
|
refs/heads/master
|
src/adhocracy/lib/auth/tag.py
|
2
|
from pylons import tmpl_context as c
from authorization import has
from adhocracy.lib.auth.authorization import NOT_LOGGED_IN
def index(check):
check.perm('tag.show')
def show(check, t):
check.perm('tag.show')
def create(check):
check.valid_email()
check.perm('tag.create')
def edit(check, t):
check.valid_email()
check.perm('tag.edit')
show(check, t)
def delete(check, t):
check.valid_email()
if has('instance.admin'):
return
check.perm('tag.delete')
show(check, t)
check.other(NOT_LOGGED_IN, not c.user)
check.other('tag_creator_is_not_user', t.creator != c.user)
|
BennettRand/Solar-Circuit
|
refs/heads/master
|
solar_circuit/libs/pyModbusTCP/utils.py
|
2
|
# -*- coding: utf-8 -*-
# Python module: Some functions for modbus data mangling
import struct
###############
# bits function
###############
def get_bits_from_int(val_int, val_size=16):
"""Get the list of bits of val_int integer (default size is 16 bits)
Return bits list, least significant bit first. Use list.reverse() if
need.
:param val_int: integer value
:type val_int: int
:param val_size: bit size of integer (word = 16, long = 32) (optional)
:type val_size: int
:returns: list of boolean "bits" (least significant first)
:rtype: list
"""
# allocate a bit_nb size list
bits = [None] * val_size
# fill bits list with bit items
for i, item in enumerate(bits):
bits[i] = bool((val_int>>i)&0x01)
# return bits list
return bits
#########################
# floating-point function
#########################
def decode_ieee(val_int):
"""Decode Python int (32 bits integer) as an IEEE single precision format
Support NaN.
:param val_int: a 32 bit integer as an int Python value
:type val_int: int
:returns: float result
:rtype: float
"""
return struct.unpack("f",struct.pack("I", val_int))[0]
def encode_ieee(val_float):
"""Encode Python float to int (32 bits integer) as an IEEE single precision
Support NaN.
:param val_float: float value to convert
:type val_float: float
:returns: IEEE 32 bits (single precision) as Python int
:rtype: int
"""
return struct.unpack("I",struct.pack("f", val_float))[0]
################################
# long format (32 bits) function
################################
def word_list_to_long(val_list, big_endian=True):
"""Word list (16 bits int) to long list (32 bits int)
By default word_list2long() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 16 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: 2's complement result
:rtype: list
"""
# allocate list for long int
long_list = [None] * int(len(val_list)/2)
# fill registers list with register items
for i, item in enumerate(long_list):
if big_endian:
long_list[i] = (val_list[i*2]<<16) + val_list[(i*2)+1]
else:
long_list[i] = (val_list[(i*2)+1]<<16) + val_list[i*2]
# return long list
return long_list
#########################################################
# 2's complement of int value (scalar and list) functions
#########################################################
def get_2comp(val_int, val_size=16):
"""Get the 2's complement of Python int val_int
:param val_int: int value to apply 2's complement
:type val_int: int
:param val_size: bit size of int value (word = 16, long = 32) (optional)
:type val_size: int
:returns: 2's complement result
:rtype: int
"""
# test MSBit (1 for negative)
if (val_int&(1<<(val_size-1))):
# do complement
val_int = val_int - (1<<val_size)
return val_int
def get_list_2comp(val_list, val_size=16):
"""Get the 2's complement of Python list val_list
:param val_list: list of int value to apply 2's complement
:type val_list: list
:param val_size: bit size of int value (word = 16, long = 32) (optional)
:type val_size: int
:returns: 2's complement result
:rtype: list
"""
return [get_2comp(val, val_size) for val in val_list]
|
mwoodson1/youtube-8m-competition
|
refs/heads/master
|
losses.py
|
1
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides definitions for non-regularized training or test losses."""
import tensorflow as tf
class BaseLoss(object):
"""Inherit from this class when implementing new losses."""
def calculate_loss(self, unused_predictions, unused_labels, **unused_params):
"""Calculates the average loss of the examples in a mini-batch.
Args:
unused_predictions: a 2-d tensor storing the prediction scores, in which
each row represents a sample in the mini-batch and each column
represents a class.
unused_labels: a 2-d tensor storing the labels, which has the same shape
as the unused_predictions. The labels must be in the range of 0 and 1.
unused_params: loss specific parameters.
Returns:
A scalar loss tensor.
"""
raise NotImplementedError()
class CrossEntropyLoss(BaseLoss):
"""Calculate the cross entropy loss between the predictions and labels.
"""
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
predictions = tf.where(tf.is_nan(predictions), tf.zeros_like(predictions), predictions)
epsilon = 10e-6
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
class HingeLoss(BaseLoss):
"""Calculate the hinge loss between the predictions and labels.
Note the subgradient is used in the backpropagation, and thus the optimization
may converge slower. The predictions trained by the hinge loss are between -1
and +1.
"""
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
with tf.name_scope("loss_hinge"):
float_labels = tf.cast(labels, tf.float32)
all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
hinge_loss = tf.maximum(
all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
class SoftmaxLoss(BaseLoss):
"""Calculate the softmax loss between the predictions and labels.
The function calculates the loss in the following way: first we feed the
predictions to the softmax activation function and then we calculate
the minus linear dot product between the logged softmax activations and the
normalized ground truth label.
It is an extension to the one-hot label. It allows for more than one positive
labels for each sample.
"""
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
return tf.reduce_mean(softmax_loss)
|
liutang123/spark
|
refs/heads/master
|
python/pyspark/sql/streaming.py
|
7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
if sys.version >= '3':
intlike = int
basestring = unicode = str
else:
intlike = (int, long)
from abc import ABCMeta, abstractmethod
from pyspark import since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.readwriter import OptionUtils, to_str
from pyspark.sql.types import *
from pyspark.sql.utils import StreamingQueryException
__all__ = ["StreamingQuery", "StreamingQueryManager", "DataStreamReader", "DataStreamWriter"]
class StreamingQuery(object):
"""
A handle to a query that is executing continuously in the background as new data arrives.
All these methods are thread-safe.
.. note:: Evolving
.. versionadded:: 2.0
"""
def __init__(self, jsq):
self._jsq = jsq
@property
@since(2.0)
def id(self):
"""Returns the unique id of this query that persists across restarts from checkpoint data.
That is, this id is generated when a query is started for the first time, and
will be the same every time it is restarted from checkpoint data.
There can only be one query with the same id active in a Spark cluster.
Also see, `runId`.
"""
return self._jsq.id().toString()
@property
@since(2.1)
def runId(self):
"""Returns the unique id of this query that does not persist across restarts. That is, every
query that is started (or restarted from checkpoint) will have a different runId.
"""
return self._jsq.runId().toString()
@property
@since(2.0)
def name(self):
"""Returns the user-specified name of the query, or null if not specified.
This name can be specified in the `org.apache.spark.sql.streaming.DataStreamWriter`
as `dataframe.writeStream.queryName("query").start()`.
This name, if set, must be unique across all active queries.
"""
return self._jsq.name()
@property
@since(2.0)
def isActive(self):
"""Whether this streaming query is currently active or not.
"""
return self._jsq.isActive()
@since(2.0)
def awaitTermination(self, timeout=None):
"""Waits for the termination of `this` query, either by :func:`query.stop()` or by an
exception. If the query has terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If the query has terminated, then all subsequent calls to this method will either return
immediately (if the query was terminated by :func:`stop()`), or throw the exception
immediately (if the query has terminated with exception).
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsq.awaitTermination(int(timeout * 1000))
else:
return self._jsq.awaitTermination()
@property
@since(2.1)
def status(self):
"""
Returns the current status of the query.
"""
return json.loads(self._jsq.status().json())
@property
@since(2.1)
def recentProgress(self):
"""Returns an array of the most recent [[StreamingQueryProgress]] updates for this query.
The number of progress updates retained for each stream is configured by Spark session
configuration `spark.sql.streaming.numRecentProgressUpdates`.
"""
return [json.loads(p.json()) for p in self._jsq.recentProgress()]
@property
@since(2.1)
def lastProgress(self):
"""
Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or
None if there were no progress updates
:return: a map
"""
lastProgress = self._jsq.lastProgress()
if lastProgress:
return json.loads(lastProgress.json())
else:
return None
@since(2.0)
def processAllAvailable(self):
"""Blocks until all available data in the source has been processed and committed to the
sink. This method is intended for testing.
.. note:: In the case of continually arriving data, this method may block forever.
Additionally, this method is only guaranteed to block until data that has been
synchronously appended data to a stream source prior to invocation.
(i.e. `getOffset` must immediately reflect the addition).
"""
return self._jsq.processAllAvailable()
@since(2.0)
def stop(self):
"""Stop this streaming query.
"""
self._jsq.stop()
@since(2.1)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> sq = sdf.writeStream.format('memory').queryName('query_explain').start()
>>> sq.processAllAvailable() # Wait a bit to generate the runtime plans.
>>> sq.explain()
== Physical Plan ==
...
>>> sq.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> sq.stop()
"""
# Cannot call `_jsq.explain(...)` because it will print in the JVM process.
# We should print it in the Python process.
print(self._jsq.explainInternal(extended))
@since(2.1)
def exception(self):
"""
:return: the StreamingQueryException if the query was terminated by an exception, or None.
"""
if self._jsq.exception().isDefined():
je = self._jsq.exception().get()
msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace()))
return StreamingQueryException(msg, stackTrace)
else:
return None
class StreamingQueryManager(object):
"""A class to manage all the :class:`StreamingQuery` StreamingQueries active.
.. note:: Evolving
.. versionadded:: 2.0
"""
def __init__(self, jsqm):
self._jsqm = jsqm
@property
@ignore_unicode_prefix
@since(2.0)
def active(self):
"""Returns a list of active queries associated with this SQLContext
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sqm = spark.streams
>>> # get the list of active streaming queries
>>> [q.name for q in sqm.active]
[u'this_query']
>>> sq.stop()
"""
return [StreamingQuery(jsq) for jsq in self._jsqm.active()]
@ignore_unicode_prefix
@since(2.0)
def get(self, id):
"""Returns an active query from this SQLContext or throws exception if an active query
with this name doesn't exist.
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.name
u'this_query'
>>> sq = spark.streams.get(sq.id)
>>> sq.isActive
True
>>> sq = sqlContext.streams.get(sq.id)
>>> sq.isActive
True
>>> sq.stop()
"""
return StreamingQuery(self._jsqm.get(id))
@since(2.0)
def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will
either return immediately (if the query was terminated by :func:`query.stop()`),
or throw the exception immediately (if the query was terminated with exception). Use
:func:`resetTerminated()` to clear past terminations and wait for new terminations.
In the case where multiple queries have terminated since :func:`resetTermination()`
was called, if any query has terminated with exception, then :func:`awaitAnyTermination()`
will throw any of the exception. For correctly documenting exceptions across multiple
queries, users need to stop all of them after any of them terminates with exception, and
then check the `query.exception()` for each query.
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsqm.awaitAnyTermination(int(timeout * 1000))
else:
return self._jsqm.awaitAnyTermination()
@since(2.0)
def resetTerminated(self):
"""Forget about past terminated queries so that :func:`awaitAnyTermination()` can be used
again to wait for new terminations.
>>> spark.streams.resetTerminated()
"""
self._jsqm.resetTerminated()
class DataStreamReader(OptionUtils):
"""
Interface used to load a streaming :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.readStream`
to access this.
.. note:: Evolving.
.. versionadded:: 2.0
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.readStream()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(2.0)
def format(self, source):
"""Specifies the input data source format.
.. note:: Evolving.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> s = spark.readStream.format("text")
"""
self._jreader = self._jreader.format(source)
return self
@since(2.0)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
.. note:: Evolving.
:param schema: a :class:`pyspark.sql.types.StructType` object
>>> s = spark.readStream.schema(sdf_schema)
"""
from pyspark.sql import SparkSession
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
spark = SparkSession.builder.getOrCreate()
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
return self
@since(2.0)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
>>> s = spark.readStream.option("x", 1)
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(2.0)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
>>> s = spark.readStream.options(x="1", y=2)
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(2.0)
def load(self, path=None, format=None, schema=None, **options):
"""Loads a data stream from a data source and returns it as a :class`DataFrame`.
.. note:: Evolving.
:param path: optional string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema.
:param options: all other string options
>>> json_sdf = spark.readStream.format("json") \\
... .schema(sdf_schema) \\
... .load(tempfile.mkdtemp())
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if path is not None:
if type(path) != str or len(path.strip()) == 0:
raise ValueError("If the path is provided for stream, it needs to be a " +
"non-empty string. List of paths are not supported.")
return self._df(self._jreader.load(path))
else:
return self._df(self._jreader.load())
@since(2.0)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None):
"""
Loads a JSON file stream and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
.. note:: Evolving.
:param path: string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an user-defined \
schema. If a schema does not have the field, it drops corrupt records during \
parsing. When inferring a schema, it implicitly adds a \
``columnNameOfCorruptRecord`` field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine)
if isinstance(path, basestring):
return self._df(self._jreader.json(path))
else:
raise TypeError("path can be only a single string")
@since(2.0)
def parquet(self, path):
"""Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
.. note:: Evolving.
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.parquet(path))
else:
raise TypeError("path can be only a single string")
@ignore_unicode_prefix
@since(2.0)
def text(self, path):
"""
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
Each line in the text file is a new row in the resulting DataFrame.
.. note:: Evolving.
:param paths: string, or list of strings, for input path(s).
>>> text_sdf = spark.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
>>> "value" in str(text_sdf.schema)
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.text(path))
else:
raise TypeError("path can be only a single string")
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None):
"""Loads a CSV file stream and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
.. note:: Evolving.
:param path: string, or list of strings, for input path(s).
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets the single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an \
user-defined schema. If a schema does not have the field, it drops corrupt \
records during parsing. When a length of parsed CSV tokens is shorter than \
an expected length of a schema, it sets `null` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse one record, which may span multiple lines. If None is
set, it uses the default value, ``false``.
>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema)
>>> csv_sdf.isStreaming
True
>>> csv_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine)
if isinstance(path, basestring):
return self._df(self._jreader.csv(path))
else:
raise TypeError("path can be only a single string")
class DataStreamWriter(object):
"""
Interface used to write a streaming :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.writeStream`
to access this.
.. note:: Evolving.
.. versionadded:: 2.0
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.writeStream()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(2.0)
def outputMode(self, outputMode):
"""Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
Options include:
* `append`:Only the new rows in the streaming DataFrame/Dataset will be written to
the sink
* `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these is some updates
* `update`:only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
.. note:: Evolving.
>>> writer = sdf.writeStream.outputMode('append')
"""
if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0:
raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode)
self._jwrite = self._jwrite.outputMode(outputMode)
return self
@since(2.0)
def format(self, source):
"""Specifies the underlying output data source.
.. note:: Evolving.
:param source: string, name of the data source, which for now can be 'parquet'.
>>> writer = sdf.writeStream.format('json')
"""
self._jwrite = self._jwrite.format(source)
return self
@since(2.0)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(2.0)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(2.0)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
.. note:: Evolving.
:param cols: name of columns
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.0)
def queryName(self, queryName):
"""Specifies the name of the :class:`StreamingQuery` that can be started with
:func:`start`. This name must be unique among all the currently active queries
in the associated SparkSession.
.. note:: Evolving.
:param queryName: unique name for the query
>>> writer = sdf.writeStream.queryName('streaming_query')
"""
if not queryName or type(queryName) != str or len(queryName.strip()) == 0:
raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName)
self._jwrite = self._jwrite.queryName(queryName)
return self
@keyword_only
@since(2.0)
def trigger(self, processingTime=None, once=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
"""
jTrigger = None
if processingTime is not None:
if once is not None:
raise ValueError('Multiple triggers not allowed.')
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
raise ValueError('No trigger provided')
self._jwrite = self._jwrite.trigger(jTrigger)
return self
@ignore_unicode_prefix
@since(2.0)
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
.. note:: Evolving.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a
streaming sink.
* `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the
sink
* `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these is some updates
* `update`:only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
:param partitionBy: names of partitioning columns
:param queryName: unique name for the query
:param options: All other string options. You may want to provide a `checkpointLocation`
for most streams, however it is not required for a `memory` stream.
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.isActive
True
>>> sq.name
u'this_query'
>>> sq.stop()
>>> sq.isActive
False
>>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start(
... queryName='that_query', outputMode="append", format='memory')
>>> sq.name
u'that_query'
>>> sq.isActive
True
>>> sq.stop()
"""
self.options(**options)
if outputMode is not None:
self.outputMode(outputMode)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if queryName is not None:
self.queryName(queryName)
if path is None:
return self._sq(self._jwrite.start())
else:
return self._sq(self._jwrite.start(path))
def _test():
import doctest
import os
import tempfile
from pyspark.sql import Row, SparkSession, SQLContext
import pyspark.sql.streaming
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.streaming.__dict__.copy()
try:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['spark'] = spark
globs['sqlContext'] = SQLContext.getOrCreate(spark.sparkContext)
globs['sdf'] = \
spark.readStream.format('text').load('python/test_support/sql/streaming')
globs['sdf_schema'] = StructType([StructField("data", StringType(), False)])
globs['df'] = \
globs['spark'].readStream.format('text').load('python/test_support/sql/streaming')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.streaming, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['spark'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
tianweizhang/nova
|
refs/heads/v0
|
nova/virt/hardware.py
|
3
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from oslo.config import cfg
import six
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
virt_cpu_opts = [
cfg.StrOpt('vcpu_pin_set',
help='Defines which pcpus that instance vcpus can use. '
'For example, "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(virt_cpu_opts)
LOG = logging.getLogger(__name__)
def get_vcpu_pin_set():
"""Parsing vcpu_pin_set config.
Returns a set of pcpu ids can be used by instances.
"""
if not CONF.vcpu_pin_set:
return None
cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set)
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
return cpuset_ids
def parse_cpu_spec(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids
def format_cpu_spec(cpuset, allow_ranges=True):
"""Format a libvirt CPU range specification.
:param cpuset: set (or list) of CPU indexes
Format a set/list of CPU indexes as a libvirt CPU
range specification. It allow_ranges is true, it
will try to detect continuous ranges of CPUs,
otherwise it will just list each CPU index explicitly.
:returns: a formatted CPU range string
"""
# We attempt to detect ranges, but don't bother with
# trying to do range negations to minimize the overall
# spec string length
if allow_ranges:
ranges = []
previndex = None
for cpuindex in sorted(cpuset):
if previndex is None or previndex != (cpuindex - 1):
ranges.append([])
ranges[-1].append(cpuindex)
previndex = cpuindex
parts = []
for entry in ranges:
if len(entry) == 1:
parts.append(str(entry[0]))
else:
parts.append("%d-%d" % (entry[0], entry[len(entry) - 1]))
return ",".join(parts)
else:
return ",".join(str(id) for id in sorted(cpuset))
def get_number_of_serial_ports(flavor, image_meta):
"""Get the number of serial consoles from the flavor or image
:param flavor: Flavor object to read extra specs from
:param image_meta: Image object to read image metadata from
If flavor extra specs is not set, then any image meta value is permitted.
If flavour extra specs *is* set, then this provides the default serial
port count. The image meta is permitted to override the extra specs, but
*only* with a lower value. ie
- flavor hw:serial_port_count=4
VM gets 4 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=2
VM gets 2 serial ports
- image hw_serial_port_count=6
VM gets 6 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=6
Abort guest boot - forbidden to exceed flavor value
:returns: number of serial ports
"""
def get_number(obj, property):
num_ports = obj.get(property)
if num_ports is not None:
try:
num_ports = int(num_ports)
except ValueError:
raise exception.ImageSerialPortNumberInvalid(
num_ports=num_ports, property=property)
return num_ports
image_meta_prop = (image_meta or {}).get('properties', {})
flavor_num_ports = get_number(flavor.extra_specs, "hw:serial_port_count")
image_num_ports = get_number(image_meta_prop, "hw_serial_port_count")
if (flavor_num_ports and image_num_ports) is not None:
if image_num_ports > flavor_num_ports:
raise exception.ImageSerialPortNumberExceedFlavorValue()
return image_num_ports
return flavor_num_ports or image_num_ports or 1
class VirtCPUTopology(object):
def __init__(self, sockets, cores, threads):
"""Create a new CPU topology object
:param sockets: number of sockets, at least 1
:param cores: number of cores, at least 1
:param threads: number of threads, at least 1
Create a new CPU topology object representing the
number of sockets, cores and threads to use for
the virtual instance.
"""
self.sockets = sockets
self.cores = cores
self.threads = threads
def score(self, wanttopology):
"""Calculate score for the topology against a desired configuration
:param wanttopology: VirtCPUTopology instance for preferred topology
Calculate a score indicating how well this topology
matches against a preferred topology. A score of 3
indicates an exact match for sockets, cores and threads.
A score of 2 indicates a match of sockets & cores or
sockets & threads or cores and threads. A score of 1
indicates a match of sockets or cores or threads. A
score of 0 indicates no match
:returns: score in range 0 (worst) to 3 (best)
"""
score = 0
if (wanttopology.sockets != -1 and
self.sockets == wanttopology.sockets):
score = score + 1
if (wanttopology.cores != -1 and
self.cores == wanttopology.cores):
score = score + 1
if (wanttopology.threads != -1 and
self.threads == wanttopology.threads):
score = score + 1
return score
@staticmethod
def get_topology_constraints(flavor, image_meta):
"""Get the topology constraints declared in flavor or image
:param flavor: Flavor object to read extra specs from
:param image_meta: Image object to read image metadata from
Gets the topology constraints from the configuration defined
in the flavor extra specs or the image metadata. In the flavor
this will look for
hw:cpu_sockets - preferred socket count
hw:cpu_cores - preferred core count
hw:cpu_threads - preferred thread count
hw:cpu_maxsockets - maximum socket count
hw:cpu_maxcores - maximum core count
hw:cpu_maxthreads - maximum thread count
In the image metadata this will look at
hw_cpu_sockets - preferred socket count
hw_cpu_cores - preferred core count
hw_cpu_threads - preferred thread count
hw_cpu_maxsockets - maximum socket count
hw_cpu_maxcores - maximum core count
hw_cpu_maxthreads - maximum thread count
The image metadata must be strictly lower than any values
set in the flavor. All values are, however, optional.
This will return a pair of VirtCPUTopology instances,
the first giving the preferred socket/core/thread counts,
and the second giving the upper limits on socket/core/
thread counts.
exception.ImageVCPULimitsRangeExceeded will be raised
if the maximum counts set against the image exceed
the maximum counts set against the flavor
exception.ImageVCPUTopologyRangeExceeded will be raised
if the preferred counts set against the image exceed
the maximum counts set against the image or flavor
:returns: (preferred topology, maximum topology)
"""
# Obtain the absolute limits from the flavor
flvmaxsockets = int(flavor.extra_specs.get(
"hw:cpu_max_sockets", 65536))
flvmaxcores = int(flavor.extra_specs.get(
"hw:cpu_max_cores", 65536))
flvmaxthreads = int(flavor.extra_specs.get(
"hw:cpu_max_threads", 65536))
LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvmaxsockets,
"cores": flvmaxcores,
"threads": flvmaxthreads})
# Get any customized limits from the image
maxsockets = int(image_meta.get("properties", {})
.get("hw_cpu_max_sockets", flvmaxsockets))
maxcores = int(image_meta.get("properties", {})
.get("hw_cpu_max_cores", flvmaxcores))
maxthreads = int(image_meta.get("properties", {})
.get("hw_cpu_max_threads", flvmaxthreads))
LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": maxsockets,
"cores": maxcores,
"threads": maxthreads})
# Image limits are not permitted to exceed the flavor
# limits. ie they can only lower what the flavor defines
if ((maxsockets > flvmaxsockets) or
(maxcores > flvmaxcores) or
(maxthreads > flvmaxthreads)):
raise exception.ImageVCPULimitsRangeExceeded(
sockets=maxsockets,
cores=maxcores,
threads=maxthreads,
maxsockets=flvmaxsockets,
maxcores=flvmaxcores,
maxthreads=flvmaxthreads)
# Get any default preferred topology from the flavor
flvsockets = int(flavor.extra_specs.get("hw:cpu_sockets", -1))
flvcores = int(flavor.extra_specs.get("hw:cpu_cores", -1))
flvthreads = int(flavor.extra_specs.get("hw:cpu_threads", -1))
LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flvsockets,
"cores": flvcores,
"threads": flvthreads})
# If the image limits have reduced the flavor limits
# we might need to discard the preferred topology
# from the flavor
if ((flvsockets > maxsockets) or
(flvcores > maxcores) or
(flvthreads > maxthreads)):
flvsockets = flvcores = flvthreads = -1
# Finally see if the image has provided a preferred
# topology to use
sockets = int(image_meta.get("properties", {})
.get("hw_cpu_sockets", -1))
cores = int(image_meta.get("properties", {})
.get("hw_cpu_cores", -1))
threads = int(image_meta.get("properties", {})
.get("hw_cpu_threads", -1))
LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": sockets,
"cores": cores,
"threads": threads})
# Image topology is not permitted to exceed image/flavor
# limits
if ((sockets > maxsockets) or
(cores > maxcores) or
(threads > maxthreads)):
raise exception.ImageVCPUTopologyRangeExceeded(
sockets=sockets,
cores=cores,
threads=threads,
maxsockets=maxsockets,
maxcores=maxcores,
maxthreads=maxthreads)
# If no preferred topology was set against the image
# then use the preferred topology from the flavor
# We use 'and' not 'or', since if any value is set
# against the image this invalidates the entire set
# of values from the flavor
if sockets == -1 and cores == -1 and threads == -1:
sockets = flvsockets
cores = flvcores
threads = flvthreads
LOG.debug("Chosen %(sockets)d:%(cores)d:%(threads)d limits "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"sockets": sockets, "cores": cores,
"threads": threads, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
return (VirtCPUTopology(sockets, cores, threads),
VirtCPUTopology(maxsockets, maxcores, maxthreads))
@staticmethod
def get_possible_topologies(vcpus, maxtopology, allow_threads):
"""Get a list of possible topologies for a vCPU count
:param vcpus: total number of CPUs for guest instance
:param maxtopology: VirtCPUTopology for upper limits
:param allow_threads: if the hypervisor supports CPU threads
Given a total desired vCPU count and constraints on the
maximum number of sockets, cores and threads, return a
list of VirtCPUTopology instances that represent every
possible topology that satisfies the constraints.
exception.ImageVCPULimitsRangeImpossible is raised if
it is impossible to achieve the total vcpu count given
the maximum limits on sockets, cores & threads.
:returns: list of VirtCPUTopology instances
"""
# Clamp limits to number of vcpus to prevent
# iterating over insanely large list
maxsockets = min(vcpus, maxtopology.sockets)
maxcores = min(vcpus, maxtopology.cores)
maxthreads = min(vcpus, maxtopology.threads)
if not allow_threads:
maxthreads = 1
LOG.debug("Build topologies for %(vcpus)d vcpu(s) "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"vcpus": vcpus, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
# Figure out all possible topologies that match
# the required vcpus count and satisfy the declared
# limits. If the total vCPU count were very high
# it might be more efficient to factorize the vcpu
# count and then only iterate over its factors, but
# that's overkill right now
possible = []
for s in range(1, maxsockets + 1):
for c in range(1, maxcores + 1):
for t in range(1, maxthreads + 1):
if t * c * s == vcpus:
possible.append(VirtCPUTopology(s, c, t))
# We want to
# - Minimize threads (ie larger sockets * cores is best)
# - Prefer sockets over cores
possible = sorted(possible, reverse=True,
key=lambda x: (x.sockets * x.cores,
x.sockets,
x.threads))
LOG.debug("Got %d possible topologies", len(possible))
if len(possible) == 0:
raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus,
sockets=maxsockets,
cores=maxcores,
threads=maxthreads)
return possible
@staticmethod
def sort_possible_topologies(possible, wanttopology):
"""Sort the topologies in order of preference
:param possible: list of VirtCPUTopology instances
:param wanttopology: VirtCPUTopology for preferred topology
This takes the list of possible topologies and resorts
it such that those configurations which most closely
match the preferred topology are first.
:returns: sorted list of VirtCPUTopology instances
"""
# Look at possible topologies and score them according
# to how well they match the preferred topologies
# We don't use python's sort(), since we want to
# preserve the sorting done when populating the
# 'possible' list originally
scores = collections.defaultdict(list)
for topology in possible:
score = topology.score(wanttopology)
scores[score].append(topology)
# Build list of all possible topologies sorted
# by the match score, best match first
desired = []
desired.extend(scores[3])
desired.extend(scores[2])
desired.extend(scores[1])
desired.extend(scores[0])
return desired
@staticmethod
def get_desirable_configs(flavor, image_meta, allow_threads=True):
"""Get desired CPU topologies according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return this list sorted in order of preference.
:returns: sorted list of VirtCPUTopology instances
"""
LOG.debug("Getting desirable topologies for flavor %(flavor)s "
"and image_meta %(image_meta)s",
{"flavor": flavor, "image_meta": image_meta})
preferred, maximum = (
VirtCPUTopology.get_topology_constraints(flavor,
image_meta))
possible = VirtCPUTopology.get_possible_topologies(
flavor.vcpus, maximum, allow_threads)
desired = VirtCPUTopology.sort_possible_topologies(
possible, preferred)
return desired
@staticmethod
def get_best_config(flavor, image_meta, allow_threads=True):
"""Get bst CPU topology according to settings
:param flavor: Flavor object to query extra specs from
:param image_meta: ImageMeta object to query properties from
:param allow_threads: if the hypervisor supports CPU threads
Look at the properties set in the flavor extra specs and
the image metadata and build up a list of all possible
valid CPU topologies that can be used in the guest. Then
return the best topology to use
:returns: a VirtCPUTopology instance for best topology
"""
return VirtCPUTopology.get_desirable_configs(flavor,
image_meta,
allow_threads)[0]
class VirtNUMATopologyCell(object):
"""Class for reporting NUMA resources in a cell
The VirtNUMATopologyCell class represents the
hardware resources present in a NUMA cell.
"""
def __init__(self, id, cpuset, memory):
"""Create a new NUMA Cell
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
Creates a new NUMA cell object to record the hardware
resources.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCell, self).__init__()
self.id = id
self.cpuset = cpuset
self.memory = memory
def _to_dict(self):
return {'cpus': format_cpu_spec(self.cpuset, allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id}
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory)
class VirtNUMATopologyCellLimit(VirtNUMATopologyCell):
def __init__(self, id, cpuset, memory, cpu_limit, memory_limit):
"""Create a new NUMA Cell with usage
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
:param cpu_limit: maximum number of CPUs allocated
:param memory_usage: maxumum RAM allocated in KiB
Creates a new NUMA cell object to represent the max hardware
resources and utilization. The number of CPUs specified
by the @cpu_usage parameter may be larger than the number
of bits set in @cpuset if CPU overcommit is used. Likewise
the amount of RAM specified by the @memory_usage parameter
may be larger than the available RAM in @memory if RAM
overcommit is used.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCellLimit, self).__init__(
id, cpuset, memory)
self.cpu_limit = cpu_limit
self.memory_limit = memory_limit
def _to_dict(self):
data_dict = super(VirtNUMATopologyCellLimit, self)._to_dict()
data_dict['mem']['limit'] = self.memory_limit
data_dict['cpu_limit'] = self.cpu_limit
return data_dict
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cpu_limit = data_dict.get('cpu_limit', len(cpuset))
memory_limit = data_dict.get('mem', {}).get('limit', memory)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory, cpu_limit, memory_limit)
class VirtNUMATopologyCellUsage(VirtNUMATopologyCell):
"""Class for reporting NUMA resources and usage in a cell
The VirtNUMATopologyCellUsage class specializes
VirtNUMATopologyCell to include information about the
utilization of hardware resources in a NUMA cell.
"""
def __init__(self, id, cpuset, memory, cpu_usage=0, memory_usage=0):
"""Create a new NUMA Cell with usage
:param id: integer identifier of cell
:param cpuset: set containing list of CPU indexes
:param memory: RAM measured in KiB
:param cpu_usage: number of CPUs allocated
:param memory_usage: RAM allocated in KiB
Creates a new NUMA cell object to record the hardware
resources and utilization. The number of CPUs specified
by the @cpu_usage parameter may be larger than the number
of bits set in @cpuset if CPU overcommit is used. Likewise
the amount of RAM specified by the @memory_usage parameter
may be larger than the available RAM in @memory if RAM
overcommit is used.
:returns: a new NUMA cell object
"""
super(VirtNUMATopologyCellUsage, self).__init__(
id, cpuset, memory)
self.cpu_usage = cpu_usage
self.memory_usage = memory_usage
@classmethod
def fit_instance_cell(cls, host_cell, instance_cell, limit_cell=None):
"""Check if a instance cell can fit and set it's cell id
:param host_cell: host cell to fit the instance cell onto
:param instance_cell: instance cell we want to fit
:param limit_cell: cell with limits of the host_cell if any
Make sure we can fit the instance cell onto a host cell and if so,
return a new VirtNUMATopologyCell with the id set to that of
the host, or None if the cell exceeds the limits of the host
:returns: a new instance cell or None
"""
# NOTE (ndipanov): do not allow an instance to overcommit against
# itself on any NUMA cell
if (instance_cell.memory > host_cell.memory or
len(instance_cell.cpuset) > len(host_cell.cpuset)):
return None
if limit_cell:
memory_usage = host_cell.memory_usage + instance_cell.memory
cpu_usage = host_cell.cpu_usage + len(instance_cell.cpuset)
if (memory_usage > limit_cell.memory_limit or
cpu_usage > limit_cell.cpu_limit):
return None
return VirtNUMATopologyCell(
host_cell.id, instance_cell.cpuset, instance_cell.memory)
def _to_dict(self):
data_dict = super(VirtNUMATopologyCellUsage, self)._to_dict()
data_dict['mem']['used'] = self.memory_usage
data_dict['cpu_usage'] = self.cpu_usage
return data_dict
@classmethod
def _from_dict(cls, data_dict):
cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(cell_id, cpuset, memory, cpu_usage, memory_usage)
class VirtNUMATopology(object):
"""Base class for tracking NUMA topology information
The VirtNUMATopology class represents the NUMA hardware
topology for memory and CPUs in any machine. It is
later specialized for handling either guest instance
or compute host NUMA topology.
"""
def __init__(self, cells=None):
"""Create a new NUMA topology object
:param cells: list of VirtNUMATopologyCell instances
"""
super(VirtNUMATopology, self).__init__()
self.cells = cells or []
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self._to_dict()))
def _to_dict(self):
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[cls.cell_class._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
def to_json(self):
return jsonutils.dumps(self._to_dict())
@classmethod
def from_json(cls, json_string):
return cls._from_dict(jsonutils.loads(json_string))
class VirtNUMAInstanceTopology(VirtNUMATopology):
"""Class to represent the topology configured for a guest
instance. It provides helper APIs to determine configuration
from the metadata specified against the flavour and or
disk image
"""
cell_class = VirtNUMATopologyCell
@staticmethod
def _get_flavor_or_image_prop(flavor, image_meta, propname):
flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname)
image_val = image_meta.get("hw_" + propname)
if flavor_val is not None:
if image_val is not None:
raise exception.ImageNUMATopologyForbidden(
name='hw_' + propname)
return flavor_val
else:
return image_val
@classmethod
def _get_constraints_manual(cls, nodes, flavor, image_meta):
cells = []
totalmem = 0
availcpus = set(range(flavor['vcpus']))
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're expecting both properties set, so
# raise an error if either is missing
if cpus is None or mem is None:
raise exception.ImageNUMATopologyIncomplete()
mem = int(mem)
cpuset = parse_cpu_spec(cpus)
for cpu in cpuset:
if cpu > (flavor['vcpus'] - 1):
raise exception.ImageNUMATopologyCPUOutOfRange(
cpunum=cpu, cpumax=(flavor['vcpus'] - 1))
if cpu not in availcpus:
raise exception.ImageNUMATopologyCPUDuplicates(
cpunum=cpu)
availcpus.remove(cpu)
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
totalmem = totalmem + mem
if availcpus:
raise exception.ImageNUMATopologyCPUsUnassigned(
cpuset=str(availcpus))
if totalmem != flavor['memory_mb']:
raise exception.ImageNUMATopologyMemoryOutOfRange(
memsize=totalmem,
memtotal=flavor['memory_mb'])
return cls(cells)
@classmethod
def _get_constraints_auto(cls, nodes, flavor, image_meta):
if ((flavor['vcpus'] % nodes) > 0 or
(flavor['memory_mb'] % nodes) > 0):
raise exception.ImageNUMATopologyAsymmetric()
cells = []
for node in range(nodes):
cpus = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.%d" % node)
mem = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_mem.%d" % node)
# We're not expecting any properties set, so
# raise an error if there are any
if cpus is not None or mem is not None:
raise exception.ImageNUMATopologyIncomplete()
ncpus = int(flavor['vcpus'] / nodes)
mem = int(flavor['memory_mb'] / nodes)
start = node * ncpus
cpuset = set(range(start, start + ncpus))
cells.append(VirtNUMATopologyCell(node, cpuset, mem))
return cls(cells)
@classmethod
def get_constraints(cls, flavor, image_meta):
nodes = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_nodes")
if nodes is None:
return None
nodes = int(nodes)
# We'll pick what path to go down based on whether
# anything is set for the first node. Both paths
# have logic to cope with inconsistent property usage
auto = cls._get_flavor_or_image_prop(
flavor, image_meta, "numa_cpus.0") is None
if auto:
return cls._get_constraints_auto(
nodes, flavor, image_meta)
else:
return cls._get_constraints_manual(
nodes, flavor, image_meta)
class VirtNUMALimitTopology(VirtNUMATopology):
"""Class to represent the max resources of a compute node used
for checking oversubscription limits.
"""
cell_class = VirtNUMATopologyCellLimit
class VirtNUMAHostTopology(VirtNUMATopology):
"""Class represents the NUMA configuration and utilization
of a compute node. As well as exposing the overall topology
it tracks the utilization of the resources by guest instances
"""
cell_class = VirtNUMATopologyCellUsage
@staticmethod
def can_fit_instances(host, instances):
"""Test if the instance topology can fit into the host
Returns True if all the cells of the all the instance topologies in
'instances' exist in the given 'host' topology. False otherwise.
"""
if not host:
return True
host_cells = set(cell.id for cell in host.cells)
instances_cells = [set(cell.id for cell in instance.cells)
for instance in instances]
return all(instance_cells <= host_cells
for instance_cells in instances_cells)
@classmethod
def fit_instance_to_host(cls, host_topology, instance_topology,
limits_topology=None):
"""Fit the instance topology onto the host topology given the limits
:param host_topology: VirtNUMAHostTopology object to fit an instance on
:param instance_topology: VirtNUMAInstanceTopology object to be fitted
:param limits_topology: VirtNUMALimitTopology that defines limits
Given a host and instance topology and optionally limits - this method
will attempt to fit instance cells onto all permutations of host cells
by calling the fit_instance_cell method, and return a new
VirtNUMAInstanceTopology with it's cell ids set to host cell id's of
the first successful permutation, or None.
"""
if (not (host_topology and instance_topology) or
len(host_topology) < len(instance_topology)):
return
else:
if limits_topology is None:
limits_topology_cells = itertools.repeat(
None, len(host_topology))
else:
limits_topology_cells = limits_topology.cells
# TODO(ndipanov): We may want to sort permutations differently
# depending on whether we want packing/spreading over NUMA nodes
for host_cell_perm in itertools.permutations(
zip(host_topology.cells, limits_topology_cells),
len(instance_topology)
):
cells = []
for (host_cell, limit_cell), instance_cell in zip(
host_cell_perm, instance_topology.cells):
got_cell = cls.cell_class.fit_instance_cell(
host_cell, instance_cell, limit_cell)
if got_cell is None:
break
cells.append(got_cell)
if len(cells) == len(host_cell_perm):
return VirtNUMAInstanceTopology(cells=cells)
@classmethod
def usage_from_instances(cls, host, instances, free=False):
"""Get host topology usage
:param host: VirtNUMAHostTopology with usage information
:param instances: list of VirtNUMAInstanceTopology
:param free: If True usage of the host will be decreased
Sum the usage from all @instances to report the overall
host topology usage
:returns: VirtNUMAHostTopology including usage information
"""
if host is None:
return
instances = instances or []
cells = []
sign = -1 if free else 1
for hostcell in host.cells:
memory_usage = hostcell.memory_usage
cpu_usage = hostcell.cpu_usage
for instance in instances:
for instancecell in instance.cells:
if instancecell.id == hostcell.id:
memory_usage = (
memory_usage + sign * instancecell.memory)
cpu_usage = cpu_usage + sign * len(instancecell.cpuset)
cell = cls.cell_class(
hostcell.id, hostcell.cpuset, hostcell.memory,
max(0, cpu_usage), max(0, memory_usage))
cells.append(cell)
return cls(cells)
@classmethod
def claim_test(cls, host, instances, limits=None):
"""Test if we can claim an instance on the host with given limits.
:param host: VirtNUMAHostTopology with usage information
:param instances: list of VirtNUMAInstanceTopology
:param limits: VirtNUMALimitTopology with max values set. Should
match the host topology otherwise
:returns: None if the claim succeeds or text explaining the error.
"""
if not (host and instances):
return
if not cls.can_fit_instances(host, instances):
return (_("Requested instance NUMA topology cannot fit "
"the given host NUMA topology."))
if not limits:
return
claimed_host = cls.usage_from_instances(host, instances)
for claimed_cell, limit_cell in zip(claimed_host.cells, limits.cells):
if (claimed_cell.memory_usage > limit_cell.memory_limit or
claimed_cell.cpu_usage > limit_cell.cpu_limit):
return (_("Requested instance NUMA topology is too large for "
"the given host NUMA topology limits."))
# TODO(ndipanov): Remove when all code paths are using objects
def instance_topology_from_instance(instance):
"""Convenience method for getting the numa_topology out of instances
Since we may get an Instance as either a dict, a db object, or an actual
Instance object, this makes sure we get beck either None, or an instance
of objects.InstanceNUMATopology class.
"""
if isinstance(instance, objects.Instance):
# NOTE (ndipanov): This may cause a lazy-load of the attribute
instance_numa_topology = instance.numa_topology
else:
if 'numa_topology' in instance:
instance_numa_topology = instance['numa_topology']
elif 'uuid' in instance:
try:
instance_numa_topology = (
objects.InstanceNUMATopology.get_by_instance_uuid(
context.get_admin_context(), instance['uuid'])
)
except exception.NumaTopologyNotFound:
instance_numa_topology = None
else:
instance_numa_topology = None
if instance_numa_topology:
if isinstance(instance_numa_topology, six.string_types):
instance_numa_topology = VirtNUMAInstanceTopology.from_json(
instance_numa_topology)
elif isinstance(instance_numa_topology, dict):
# NOTE (ndipanov): A horrible hack so that we can use this in the
# scheduler, since the InstanceNUMATopology object is serialized
# raw using the obj_base.obj_to_primitive, (which is buggy and will
# give us a dict with a list of InstanceNUMACell objects), and then
# passed to jsonutils.to_primitive, which will make a dict out of
# those objects. All of this is done by
# scheduler.utils.build_request_spec called in the conductor.
#
# Remove when request_spec is a proper object itself!
dict_cells = instance_numa_topology.get('cells')
if dict_cells:
cells = [VirtNUMATopologyCell(cell['id'],
set(cell['cpuset']),
cell['memory'])
for cell in dict_cells]
instance_numa_topology = VirtNUMAInstanceTopology(cells=cells)
return instance_numa_topology
# TODO(ndipanov): Remove when all code paths are using objects
def host_topology_and_format_from_host(host):
"""Convenience method for getting the numa_topology out of hosts
Since we may get a host as either a dict, a db object, or an actual
ComputeNode object, or an instance of HostState class, this makes sure we
get beck either None, or an instance of VirtNUMAHostTopology class.
:returns: A two-tuple, first element is the topology itself or None, second
is a boolean set to True if topology was in json format.
"""
was_json = False
try:
host_numa_topology = host.get('numa_topology')
except AttributeError:
host_numa_topology = host.numa_topology
if host_numa_topology is not None and isinstance(
host_numa_topology, six.string_types):
was_json = True
host_numa_topology = VirtNUMAHostTopology.from_json(host_numa_topology)
return host_numa_topology, was_json
# TODO(ndipanov): Remove when all code paths are using objects
def get_host_numa_usage_from_instance(host, instance, free=False,
never_serialize_result=False):
"""Calculate new 'numa_usage' of 'host' from 'instance' NUMA usage
This is a convenience method to help us handle the fact that we use several
different types throughout the code (ComputeNode and Instance objects,
dicts, scheduler HostState) which may have both json and deserialized
versions of VirtNUMATopology classes.
Handles all the complexity without polluting the class method with it.
:param host: nova.objects.ComputeNode instance, or a db object or dict
:param instance: nova.objects.Instance instance, or a db object or dict
:param free: if True the the returned topology will have it's usage
decreased instead.
:param never_serialize_result: if True result will always be an instance of
VirtNUMAHostTopology class.
:returns: numa_usage in the format it was on the host or
VirtNUMAHostTopology instance if never_serialize_result was True
"""
instance_numa_topology = instance_topology_from_instance(instance)
if instance_numa_topology:
instance_numa_topology = [instance_numa_topology]
host_numa_topology, jsonify_result = host_topology_and_format_from_host(
host)
updated_numa_topology = (
VirtNUMAHostTopology.usage_from_instances(
host_numa_topology, instance_numa_topology, free=free))
if updated_numa_topology is not None:
if jsonify_result and not never_serialize_result:
updated_numa_topology = updated_numa_topology.to_json()
return updated_numa_topology
|
Mierdin/devstack-odl
|
refs/heads/master
|
files/pip-1.4.1/build/lib/pip/vendor/distlib/database.py
|
79
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import hashlib
import logging
import os
import sys
import zipimport
from . import DistlibException
from .compat import StringIO, configparser, string_types
from .version import get_scheme, UnsupportedVersionError
from .markers import interpret
from .metadata import Metadata
from .util import (parse_requirement, cached_property, get_export_entry,
CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
DIST_FILES = ('INSTALLER', 'METADATA', 'RECORD', 'REQUESTED', 'RESOURCES',
'EXPORTS', 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
for path in self.path:
realpath = os.path.realpath(path)
if not os.path.isdir(realpath):
continue
for dir in os.listdir(realpath):
dist_path = os.path.join(realpath, dir)
if self._include_dist and dir.endswith(DISTINFO_EXT):
yield new_dist_class(dist_path, env=self)
elif self._include_egg and dir.endswith(('.egg-info',
'.egg')):
yield old_dist_class(dist_path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_components = p.rsplit(' ', 1)
if len(p_components) == 1 or matcher is None:
if name == p_components[0]:
yield dist
break
else:
p_name, p_ver = p_components
if len(p_ver) < 2 or p_ver[0] != '(' or p_ver[-1] != ')':
raise DistlibException(
'distribution %r has invalid Provides field: %r' %
(dist.name, p))
p_ver = p_ver[1:-1] # trim off the parenthesis
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.md5_digest = None
self.extras = None # additional features requested during installation
@property
def download_url(self):
"""
The download URL for this distribution.
"""
return self.metadata.download_url
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata['Provides-Dist']
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return self.filter_requirements(plist)
@property
def requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist)
@property
def setup_requires(self):
rlist = self.metadata['Setup-Requires-Dist']
return self.filter_requirements(rlist)
@property
def test_requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist, extras=['test'])
@property
def doc_requires(self):
rlist = self.metadata['Requires-Dist']
return self.filter_requirements(rlist, extras=['doc'])
def filter_requirements(self, rlist, context=None, extras=None):
result = set()
marked = []
for req in rlist:
if ';' not in req:
result.add(req)
else:
marked.append(req.split(';', 1))
if marked:
if context is None:
context = {}
if extras is None:
extras = self.extras
if not extras:
extras = [None]
else:
extras = list(extras) # leave original alone
extras.append(None)
for extra in extras:
context['extra'] = extra
for r, marker in marked:
if interpret(marker, context):
result.add(r.strip())
return result
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
# Note this is similar to code in make_graph - to be refactored
for p in self.provides:
vm = scheme.matcher(p)
if vm.key != name:
continue
version = vm.exact_version
assert version
try:
result = matcher.match(version)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.download_url:
suffix = ' [%s]' % self.download_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and download_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.download_url == other.download_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.download_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``METADATA`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used)."""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
metadata_path = os.path.join(path, 'METADATA')
metadata = Metadata(path=metadata_path, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
path = self.get_distinfo_file('REQUESTED')
self.requested = os.path.exists(path)
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
path = self.get_distinfo_file('RECORD')
with CSVReader(path) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
rf = self.get_distinfo_file('EXPORTS')
if os.path.exists(rf):
result = self.read_exports(rf)
return result
def read_exports(self, filename=None):
"""
Read exports data from a file in .ini format.
:param filename: An absolute pathname of the file to read. If not
specified, the EXPORTS file in the .dist-info
directory of the distribution is read.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
rf = filename or self.get_distinfo_file('EXPORTS')
if os.path.exists(rf):
cp = configparser.ConfigParser()
cp.read(rf)
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
entry.dist = self
entries[name] = entry
return result
def write_exports(self, exports, filename=None):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
:param filename: The absolute pathname of the file to write to. If not
specified, the EXPORTS file in the .dist-info
directory is written to.
"""
rf = filename or self.get_distinfo_file('EXPORTS')
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
with open(rf, 'w') as f:
cp.write(f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
path = self.get_distinfo_file('RESOURCES')
with CSVReader(path) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = os.path.join(self.path, 'RECORD')
logger.info('creating %s', record_path)
if dry_run:
return
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = os.path.join(self.path, 'RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: string
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: %r' %
path)
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata['Name'], metadata['Version'])
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata['Name'], metadata['Version'])
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path* must be the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with open(req_path, 'r') as fp:
lines = fp.read().splitlines()
except IOError:
return reqs
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
requires = zipf.get_data('EGG-INFO/requires.txt')
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
path = os.path.join(path, 'PKG-INFO')
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires(req_path)
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
if metadata['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in metadata:
del metadata[field]
metadata['Requires-Dist'] += requires
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, hash, size in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self, local=False):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, local=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter local: If *local* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type local: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if local:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
comps = p.strip().rsplit(" ", 1)
name = comps[0]
version = None
if len(comps) == 2:
version = comps[1]
if len(version) < 3 or version[0] != '(' or version[-1] != ')':
logger.warning('distribution %r has ill-formed '
'provides field: %r', dist.name, p)
continue
# don't raise an exception. Legacy installed distributions
# could have all manner of metadata
#raise DistlibException('distribution %r has ill-formed '
# 'provides field: %r' % (dist.name, p))
version = version[1:-1] # trim off parenthesis
# Add name in lower case for case-insensitivity
name = name.lower()
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.requires | dist.setup_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
md = Metadata(**kwargs)
md['Name'] = name
md['Version'] = version
return Distribution(md)
|
chrisdickinson/nojs
|
refs/heads/master
|
build/android/emma_coverage_stats_test.py
|
6
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import unittest
from xml.etree import ElementTree
import emma_coverage_stats
from pylib.constants import host_paths
with host_paths.SysPath(host_paths.PYMOCK_PATH):
import mock # pylint: disable=import-error
EMPTY_COVERAGE_STATS_DICT = {
'files': {},
'patch': {
'incremental': {
'covered': 0, 'total': 0
}
}
}
class _EmmaHtmlParserTest(unittest.TestCase):
"""Tests for _EmmaHtmlParser.
Uses modified EMMA report HTML that contains only the subset of tags needed
for test verification.
"""
def setUp(self):
self.emma_dir = 'fake/dir/'
self.parser = emma_coverage_stats._EmmaHtmlParser(self.emma_dir)
self.simple_html = '<TR><TD CLASS="p">Test HTML</TD></TR>'
self.index_html = (
'<HTML>'
'<BODY>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CLASS="it" CELLSPACING="0">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'<TR>'
'<TH CLASS="f">name</TH>'
'<TH>class, %</TH>'
'<TH>method, %</TH>'
'<TH>block, %</TH>'
'<TH>line, %</TH>'
'</TR>'
'<TR CLASS="o">'
'<TD><A HREF="_files/0.html"'
'>org.chromium.chrome.browser</A></TD>'
'<TD CLASS="h">0% (0/3)</TD>'
'</TR>'
'<TR>'
'<TD><A HREF="_files/1.html"'
'>org.chromium.chrome.browser.tabmodel</A></TD>'
'<TD CLASS="h">0% (0/8)</TD>'
'</TR>'
'</TABLE>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'</BODY>'
'</HTML>'
)
self.package_1_class_list_html = (
'<HTML>'
'<BODY>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'<TR>'
'<TH CLASS="f">name</TH>'
'<TH>class, %</TH>'
'<TH>method, %</TH>'
'<TH>block, %</TH>'
'<TH>line, %</TH>'
'</TR>'
'<TR CLASS="o">'
'<TD><A HREF="1e.html">IntentHelper.java</A></TD>'
'<TD CLASS="h">0% (0/3)</TD>'
'<TD CLASS="h">0% (0/9)</TD>'
'<TD CLASS="h">0% (0/97)</TD>'
'<TD CLASS="h">0% (0/26)</TD>'
'</TR>'
'</TABLE>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'</BODY>'
'</HTML>'
)
self.package_2_class_list_html = (
'<HTML>'
'<BODY>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'<TABLE CELLSPACING="0" WIDTH="100%">'
'<TR>'
'<TH CLASS="f">name</TH>'
'<TH>class, %</TH>'
'<TH>method, %</TH>'
'<TH>block, %</TH>'
'<TH>line, %</TH>'
'</TR>'
'<TR CLASS="o">'
'<TD><A HREF="1f.html">ContentSetting.java</A></TD>'
'<TD CLASS="h">0% (0/1)</TD>'
'</TR>'
'<TR>'
'<TD><A HREF="20.html">DevToolsServer.java</A></TD>'
'</TR>'
'<TR CLASS="o">'
'<TD><A HREF="21.html">FileProviderHelper.java</A></TD>'
'</TR>'
'<TR>'
'<TD><A HREF="22.html">ContextualMenuBar.java</A></TD>'
'</TR>'
'<TR CLASS="o">'
'<TD><A HREF="23.html">AccessibilityUtil.java</A></TD>'
'</TR>'
'<TR>'
'<TD><A HREF="24.html">NavigationPopup.java</A></TD>'
'</TR>'
'</TABLE>'
'<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
'</TABLE>'
'</BODY>'
'</HTML>'
)
self.partially_covered_tr_html = (
'<TR CLASS="p">'
'<TD CLASS="l" TITLE="78% line coverage (7 out of 9)">108</TD>'
'<TD TITLE="78% line coverage (7 out of 9 instructions)">'
'if (index < 0 || index = mSelectors.size()) index = 0;</TD>'
'</TR>'
)
self.covered_tr_html = (
'<TR CLASS="c">'
'<TD CLASS="l">110</TD>'
'<TD> if (mSelectors.get(index) != null) {</TD>'
'</TR>'
)
self.not_executable_tr_html = (
'<TR>'
'<TD CLASS="l">109</TD>'
'<TD> </TD>'
'</TR>'
)
self.tr_with_extra_a_tag = (
'<TR CLASS="z">'
'<TD CLASS="l">'
'<A name="1f">54</A>'
'</TD>'
'<TD> }</TD>'
'</TR>'
)
def testInit(self):
emma_dir = self.emma_dir
parser = emma_coverage_stats._EmmaHtmlParser(emma_dir)
self.assertEqual(parser._base_dir, emma_dir)
self.assertEqual(parser._emma_files_path, 'fake/dir/_files')
self.assertEqual(parser._index_path, 'fake/dir/index.html')
def testFindElements_basic(self):
read_values = [self.simple_html]
found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
file_path='fake', xpath_selector='.//TD')
self.assertIs(type(found), list)
self.assertIs(type(found[0]), ElementTree.Element)
self.assertEqual(found[0].text, 'Test HTML')
def testFindElements_multipleElements(self):
multiple_trs = self.not_executable_tr_html + self.covered_tr_html
read_values = ['<div>' + multiple_trs + '</div>']
found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
file_path='fake', xpath_selector='.//TR')
self.assertEquals(2, len(found))
def testFindElements_noMatch(self):
read_values = [self.simple_html]
found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
file_path='fake', xpath_selector='.//TR')
self.assertEqual(found, [])
def testFindElements_badFilePath(self):
with self.assertRaises(IOError):
with mock.patch('os.path.exists', return_value=False):
self.parser._FindElements('fake', xpath_selector='//tr')
def testGetPackageNameToEmmaFileDict_basic(self):
expected_dict = {
'org.chromium.chrome.browser.AccessibilityUtil.java':
'fake/dir/_files/23.html',
'org.chromium.chrome.browser.ContextualMenuBar.java':
'fake/dir/_files/22.html',
'org.chromium.chrome.browser.tabmodel.IntentHelper.java':
'fake/dir/_files/1e.html',
'org.chromium.chrome.browser.ContentSetting.java':
'fake/dir/_files/1f.html',
'org.chromium.chrome.browser.DevToolsServer.java':
'fake/dir/_files/20.html',
'org.chromium.chrome.browser.NavigationPopup.java':
'fake/dir/_files/24.html',
'org.chromium.chrome.browser.FileProviderHelper.java':
'fake/dir/_files/21.html'}
read_values = [self.index_html, self.package_1_class_list_html,
self.package_2_class_list_html]
return_dict, mock_open = MockOpenForFunction(
self.parser.GetPackageNameToEmmaFileDict, read_values)
self.assertDictEqual(return_dict, expected_dict)
self.assertEqual(mock_open.call_count, 3)
calls = [mock.call('fake/dir/index.html'),
mock.call('fake/dir/_files/1.html'),
mock.call('fake/dir/_files/0.html')]
mock_open.assert_has_calls(calls)
def testGetPackageNameToEmmaFileDict_noPackageElements(self):
self.parser._FindElements = mock.Mock(return_value=[])
return_dict = self.parser.GetPackageNameToEmmaFileDict()
self.assertDictEqual({}, return_dict)
def testGetLineCoverage_status_basic(self):
line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
self.assertEqual(line_coverage[0].covered_status,
emma_coverage_stats.COVERED)
def testGetLineCoverage_status_statusMissing(self):
line_coverage = self.GetLineCoverageWithFakeElements(
[self.not_executable_tr_html])
self.assertEqual(line_coverage[0].covered_status,
emma_coverage_stats.NOT_EXECUTABLE)
def testGetLineCoverage_fractionalCoverage_basic(self):
line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
self.assertEqual(line_coverage[0].fractional_line_coverage, 1.0)
def testGetLineCoverage_fractionalCoverage_partial(self):
line_coverage = self.GetLineCoverageWithFakeElements(
[self.partially_covered_tr_html])
self.assertEqual(line_coverage[0].fractional_line_coverage, 0.78)
def testGetLineCoverage_lineno_basic(self):
line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
self.assertEqual(line_coverage[0].lineno, 110)
def testGetLineCoverage_lineno_withAlternativeHtml(self):
line_coverage = self.GetLineCoverageWithFakeElements(
[self.tr_with_extra_a_tag])
self.assertEqual(line_coverage[0].lineno, 54)
def testGetLineCoverage_source(self):
self.parser._FindElements = mock.Mock(
return_value=[ElementTree.fromstring(self.covered_tr_html)])
line_coverage = self.parser.GetLineCoverage('fake_path')
self.assertEqual(line_coverage[0].source,
' if (mSelectors.get(index) != null) {')
def testGetLineCoverage_multipleElements(self):
line_coverage = self.GetLineCoverageWithFakeElements(
[self.covered_tr_html, self.partially_covered_tr_html,
self.tr_with_extra_a_tag])
self.assertEqual(len(line_coverage), 3)
def GetLineCoverageWithFakeElements(self, html_elements):
"""Wraps GetLineCoverage so mock HTML can easily be used.
Args:
html_elements: List of strings each representing an HTML element.
Returns:
A list of LineCoverage objects.
"""
elements = [ElementTree.fromstring(string) for string in html_elements]
with mock.patch('emma_coverage_stats._EmmaHtmlParser._FindElements',
return_value=elements):
return self.parser.GetLineCoverage('fake_path')
class _EmmaCoverageStatsTest(unittest.TestCase):
"""Tests for _EmmaCoverageStats."""
def setUp(self):
self.good_source_to_emma = {
'/path/to/1/File1.java': '/emma/1.html',
'/path/2/File2.java': '/emma/2.html',
'/path/2/File3.java': '/emma/3.html'
}
self.line_coverage = [
emma_coverage_stats.LineCoverage(
1, '', emma_coverage_stats.COVERED, 1.0),
emma_coverage_stats.LineCoverage(
2, '', emma_coverage_stats.COVERED, 1.0),
emma_coverage_stats.LineCoverage(
3, '', emma_coverage_stats.NOT_EXECUTABLE, 1.0),
emma_coverage_stats.LineCoverage(
4, '', emma_coverage_stats.NOT_COVERED, 1.0),
emma_coverage_stats.LineCoverage(
5, '', emma_coverage_stats.PARTIALLY_COVERED, 0.85),
emma_coverage_stats.LineCoverage(
6, '', emma_coverage_stats.PARTIALLY_COVERED, 0.20)
]
self.lines_for_coverage = [1, 3, 5, 6]
with mock.patch('emma_coverage_stats._EmmaHtmlParser._FindElements',
return_value=[]):
self.simple_coverage = emma_coverage_stats._EmmaCoverageStats(
'fake_dir', {})
def testInit(self):
coverage_stats = self.simple_coverage
self.assertIsInstance(coverage_stats._emma_parser,
emma_coverage_stats._EmmaHtmlParser)
self.assertIsInstance(coverage_stats._source_to_emma, dict)
def testNeedsCoverage_withExistingJavaFile(self):
test_file = '/path/to/file/File.java'
with mock.patch('os.path.exists', return_value=True):
self.assertTrue(
emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
def testNeedsCoverage_withNonJavaFile(self):
test_file = '/path/to/file/File.c'
with mock.patch('os.path.exists', return_value=True):
self.assertFalse(
emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
def testNeedsCoverage_fileDoesNotExist(self):
test_file = '/path/to/file/File.java'
with mock.patch('os.path.exists', return_value=False):
self.assertFalse(
emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
def testGetPackageNameFromFile_basic(self):
test_file_text = """// Test Copyright
package org.chromium.chrome.browser;
import android.graphics.RectF;"""
result_package, _ = MockOpenForFunction(
emma_coverage_stats._EmmaCoverageStats.GetPackageNameFromFile,
[test_file_text], file_path='/path/to/file/File.java')
self.assertEqual(result_package, 'org.chromium.chrome.browser.File.java')
def testGetPackageNameFromFile_noPackageStatement(self):
result_package, _ = MockOpenForFunction(
emma_coverage_stats._EmmaCoverageStats.GetPackageNameFromFile,
['not a package statement'], file_path='/path/to/file/File.java')
self.assertIsNone(result_package)
def testGetSummaryStatsForLines_basic(self):
covered, total = self.simple_coverage.GetSummaryStatsForLines(
self.line_coverage)
self.assertEqual(covered, 3.05)
self.assertEqual(total, 5)
def testGetSourceFileToEmmaFileDict(self):
package_names = {
'/path/to/1/File1.java': 'org.fake.one.File1.java',
'/path/2/File2.java': 'org.fake.File2.java',
'/path/2/File3.java': 'org.fake.File3.java'
}
package_to_emma = {
'org.fake.one.File1.java': '/emma/1.html',
'org.fake.File2.java': '/emma/2.html',
'org.fake.File3.java': '/emma/3.html'
}
with mock.patch('os.path.exists', return_value=True):
coverage_stats = self.simple_coverage
coverage_stats._emma_parser.GetPackageNameToEmmaFileDict = mock.MagicMock(
return_value=package_to_emma)
coverage_stats.GetPackageNameFromFile = lambda x: package_names[x]
result_dict = coverage_stats._GetSourceFileToEmmaFileDict(
package_names.keys())
self.assertDictEqual(result_dict, self.good_source_to_emma)
def testGetCoverageDictForFile(self):
line_coverage = self.line_coverage
self.simple_coverage._emma_parser.GetLineCoverage = lambda x: line_coverage
self.simple_coverage._source_to_emma = {'/fake/src': 'fake/emma'}
lines = self.lines_for_coverage
expected_dict = {
'absolute': {
'covered': 3.05,
'total': 5
},
'incremental': {
'covered': 2.05,
'total': 3
},
'source': [
{
'line': line_coverage[0].source,
'coverage': line_coverage[0].covered_status,
'changed': True,
'fractional_coverage': line_coverage[0].fractional_line_coverage,
},
{
'line': line_coverage[1].source,
'coverage': line_coverage[1].covered_status,
'changed': False,
'fractional_coverage': line_coverage[1].fractional_line_coverage,
},
{
'line': line_coverage[2].source,
'coverage': line_coverage[2].covered_status,
'changed': True,
'fractional_coverage': line_coverage[2].fractional_line_coverage,
},
{
'line': line_coverage[3].source,
'coverage': line_coverage[3].covered_status,
'changed': False,
'fractional_coverage': line_coverage[3].fractional_line_coverage,
},
{
'line': line_coverage[4].source,
'coverage': line_coverage[4].covered_status,
'changed': True,
'fractional_coverage': line_coverage[4].fractional_line_coverage,
},
{
'line': line_coverage[5].source,
'coverage': line_coverage[5].covered_status,
'changed': True,
'fractional_coverage': line_coverage[5].fractional_line_coverage,
}
]
}
result_dict = self.simple_coverage.GetCoverageDictForFile(
'/fake/src', lines)
self.assertDictEqual(result_dict, expected_dict)
def testGetCoverageDictForFile_emptyCoverage(self):
expected_dict = {
'absolute': {'covered': 0, 'total': 0},
'incremental': {'covered': 0, 'total': 0},
'source': []
}
self.simple_coverage._emma_parser.GetLineCoverage = lambda x: []
self.simple_coverage._source_to_emma = {'fake_dir': 'fake/emma'}
result_dict = self.simple_coverage.GetCoverageDictForFile('fake_dir', {})
self.assertDictEqual(result_dict, expected_dict)
def testGetCoverageDictForFile_missingCoverage(self):
self.simple_coverage._source_to_emma = {}
result_dict = self.simple_coverage.GetCoverageDictForFile('fake_file', {})
self.assertIsNone(result_dict)
def testGetCoverageDict_basic(self):
files_for_coverage = {
'/path/to/1/File1.java': [1, 3, 4],
'/path/2/File2.java': [1, 2]
}
self.simple_coverage._source_to_emma = {
'/path/to/1/File1.java': 'emma_1',
'/path/2/File2.java': 'emma_2'
}
coverage_info = {
'emma_1': [
emma_coverage_stats.LineCoverage(
1, '', emma_coverage_stats.COVERED, 1.0),
emma_coverage_stats.LineCoverage(
2, '', emma_coverage_stats.PARTIALLY_COVERED, 0.5),
emma_coverage_stats.LineCoverage(
3, '', emma_coverage_stats.NOT_EXECUTABLE, 1.0),
emma_coverage_stats.LineCoverage(
4, '', emma_coverage_stats.COVERED, 1.0)
],
'emma_2': [
emma_coverage_stats.LineCoverage(
1, '', emma_coverage_stats.NOT_COVERED, 1.0),
emma_coverage_stats.LineCoverage(
2, '', emma_coverage_stats.COVERED, 1.0)
]
}
expected_dict = {
'files': {
'/path/2/File2.java': {
'absolute': {'covered': 1, 'total': 2},
'incremental': {'covered': 1, 'total': 2},
'source': [{'changed': True, 'coverage': 0,
'line': '', 'fractional_coverage': 1.0},
{'changed': True, 'coverage': 1,
'line': '', 'fractional_coverage': 1.0}]
},
'/path/to/1/File1.java': {
'absolute': {'covered': 2.5, 'total': 3},
'incremental': {'covered': 2, 'total': 2},
'source': [{'changed': True, 'coverage': 1,
'line': '', 'fractional_coverage': 1.0},
{'changed': False, 'coverage': 2,
'line': '', 'fractional_coverage': 0.5},
{'changed': True, 'coverage': -1,
'line': '', 'fractional_coverage': 1.0},
{'changed': True, 'coverage': 1,
'line': '', 'fractional_coverage': 1.0}]
}
},
'patch': {'incremental': {'covered': 3, 'total': 4}}
}
# Return the relevant coverage info for each file.
self.simple_coverage._emma_parser.GetLineCoverage = (
lambda x: coverage_info[x])
result_dict = self.simple_coverage.GetCoverageDict(files_for_coverage)
self.assertDictEqual(result_dict, expected_dict)
def testGetCoverageDict_noCoverage(self):
result_dict = self.simple_coverage.GetCoverageDict({})
self.assertDictEqual(result_dict, EMPTY_COVERAGE_STATS_DICT)
class EmmaCoverageStatsGenerateCoverageReport(unittest.TestCase):
"""Tests for GenerateCoverageReport."""
def testGenerateCoverageReport_missingJsonFile(self):
with self.assertRaises(IOError):
with mock.patch('os.path.exists', return_value=False):
emma_coverage_stats.GenerateCoverageReport('', '', '')
def testGenerateCoverageReport_invalidJsonFile(self):
with self.assertRaises(ValueError):
with mock.patch('os.path.exists', return_value=True):
MockOpenForFunction(emma_coverage_stats.GenerateCoverageReport, [''],
line_coverage_file='', out_file_path='',
coverage_dir='')
def MockOpenForFunction(func, side_effects, **kwargs):
"""Allows easy mock open and read for callables that open multiple files.
Will mock the python open function in a way such that each time read() is
called on an open file, the next element in |side_effects| is returned. This
makes it easier to test functions that call open() multiple times.
Args:
func: The callable to invoke once mock files are setup.
side_effects: A list of return values for each file to return once read.
Length of list should be equal to the number calls to open in |func|.
**kwargs: Keyword arguments to be passed to |func|.
Returns:
A tuple containing the return value of |func| and the MagicMock object used
to mock all calls to open respectively.
"""
mock_open = mock.mock_open()
mock_open.side_effect = [mock.mock_open(read_data=side_effect).return_value
for side_effect in side_effects]
with mock.patch('__builtin__.open', mock_open):
return func(**kwargs), mock_open
if __name__ == '__main__':
# Suppress logging messages.
unittest.main(buffer=True)
|
tswast/google-cloud-python
|
refs/heads/master
|
dataproc/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py
|
2
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.dataproc_v1.proto import (
jobs_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class JobControllerStub(object):
"""The JobController provides methods to manage jobs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubmitJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/SubmitJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.GetJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/GetJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.ListJobs = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/ListJobs",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString,
)
self.UpdateJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/UpdateJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.CancelJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/CancelJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.DeleteJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/DeleteJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class JobControllerServicer(object):
"""The JobController provides methods to manage jobs.
"""
def SubmitJob(self, request, context):
"""Submits a job to a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetJob(self, request, context):
"""Gets the resource representation for a job in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListJobs(self, request, context):
"""Lists regions/{region}/jobs in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateJob(self, request, context):
"""Updates a job in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelJob(self, request, context):
"""Starts a job cancellation request. To access the job resource
after cancellation, call
[regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
or
[regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteJob(self, request, context):
"""Deletes the job from the project. If the job is active, the delete fails,
and the response returns `FAILED_PRECONDITION`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_JobControllerServicer_to_server(servicer, server):
rpc_method_handlers = {
"SubmitJob": grpc.unary_unary_rpc_method_handler(
servicer.SubmitJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
"GetJob": grpc.unary_unary_rpc_method_handler(
servicer.GetJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
"ListJobs": grpc.unary_unary_rpc_method_handler(
servicer.ListJobs,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.SerializeToString,
),
"UpdateJob": grpc.unary_unary_rpc_method_handler(
servicer.UpdateJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
"CancelJob": grpc.unary_unary_rpc_method_handler(
servicer.CancelJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
"DeleteJob": grpc.unary_unary_rpc_method_handler(
servicer.DeleteJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.dataproc.v1.JobController", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
Ichag/odoo
|
refs/heads/8.0
|
addons/hw_scanner/__openerp__.py
|
93
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Barcode Scanner Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Hardware Driver for Barcode Scanners',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Barcode Scanner Hardware Driver
================================
This module allows the web client to access a remotely installed barcode
scanner, and is used by the posbox to provide barcode scanner support to the
point of sale module.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Lujeni/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_snmp.py
|
22
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_snmp import ApiParameters
from library.modules.bigip_snmp import ModuleParameters
from library.modules.bigip_snmp import ModuleManager
from library.modules.bigip_snmp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_snmp import ApiParameters
from ansible.modules.network.f5.bigip_snmp import ModuleParameters
from ansible.modules.network.f5.bigip_snmp import ModuleManager
from ansible.modules.network.f5.bigip_snmp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
agent_status_traps='enabled',
agent_authentication_traps='enabled',
contact='Alice@foo.org',
device_warning_traps='enabled',
location='Lunar orbit',
)
p = ModuleParameters(params=args)
assert p.agent_status_traps == 'enabled'
assert p.agent_authentication_traps == 'enabled'
assert p.device_warning_traps == 'enabled'
assert p.location == 'Lunar orbit'
assert p.contact == 'Alice@foo.org'
def test_module_parameters_disabled(self):
args = dict(
agent_status_traps='disabled',
agent_authentication_traps='disabled',
device_warning_traps='disabled',
)
p = ModuleParameters(params=args)
assert p.agent_status_traps == 'disabled'
assert p.agent_authentication_traps == 'disabled'
assert p.device_warning_traps == 'disabled'
def test_api_parameters(self):
args = dict(
agentTrap='enabled',
authTrap='enabled',
bigipTraps='enabled',
sysLocation='Lunar orbit',
sysContact='Alice@foo.org',
)
p = ApiParameters(params=args)
assert p.agent_status_traps == 'enabled'
assert p.agent_authentication_traps == 'enabled'
assert p.device_warning_traps == 'enabled'
assert p.location == 'Lunar orbit'
assert p.contact == 'Alice@foo.org'
def test_api_parameters_disabled(self):
args = dict(
agentTrap='disabled',
authTrap='disabled',
bigipTraps='disabled',
)
p = ApiParameters(params=args)
assert p.agent_status_traps == 'disabled'
assert p.agent_authentication_traps == 'disabled'
assert p.device_warning_traps == 'disabled'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_agent_status_traps(self, *args):
set_module_args(dict(
agent_status_traps='enabled',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
agent_status_traps='disabled'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['agent_status_traps'] == 'enabled'
def test_update_allowed_addresses(self, *args):
set_module_args(dict(
allowed_addresses=[
'127.0.0.0/8',
'10.10.10.10',
'foo',
'baz.foo.com'
],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['127.0.0.0/8']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 4
assert results['allowed_addresses'] == [
'10.10.10.10', '127.0.0.0/8', 'baz.foo.com', 'foo'
]
def test_update_allowed_addresses_default(self, *args):
set_module_args(dict(
allowed_addresses=[
'default'
],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['10.0.0.0']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 1
assert results['allowed_addresses'] == ['127.0.0.0/8']
def test_update_allowed_addresses_empty(self, *args):
set_module_args(dict(
allowed_addresses=[''],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allowed_addresses=['10.0.0.0']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert len(results['allowed_addresses']) == 1
assert results['allowed_addresses'] == ['127.0.0.0/8']
|
jrgdiz/cardwalker
|
refs/heads/master
|
grammar/entities/people/decl.py
|
1
|
from pyparsing import *
import act
your = Forward().setParseAction(act.your)
its = Forward().setParseAction(act.its)
their = Forward().setParseAction(act.their)
his = Forward().setParseAction(act.his)
singleposs = Forward().setParseAction(act.singleposs)
who = Forward().setParseAction(act.who)
person = Forward().setParseAction(act.person)
people = Forward().setParseAction(act.people)
personposs = Forward().setParseAction(act.personposs)
peopleposs = Forward()
peoplecontrol = Forward().setParseAction(act.peoplecontrol)
undercontrol = Forward().setParseAction(act.undercontrol)
|
scalable-networks/gnuradio-3.7.0.1
|
refs/heads/master
|
gr-filter/python/filter/qa_iir_filter.py
|
10
|
#!/usr/bin/env python
#
# Copyright 2004,2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
class test_iir_filter(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_iir_direct_001(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
fftaps = ()
fbtaps = ()
expected_result = (0, 0, 0, 0, 0, 0, 0, 0)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_002(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
fftaps = (2,)
fbtaps = (0,)
expected_result = (2, 4, 6, 8, 10, 12, 14, 16)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_003(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
fftaps = (2, 11)
fbtaps = (0, 0)
expected_result = (2, 15, 28, 41, 54, 67, 80, 93)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_004(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
fftaps = (2, 11)
fbtaps = (0, -1)
expected_result = (2, 13, 15, 26, 28, 39, 41, 52)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_005(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
fftaps = (2, 11, 0)
fbtaps = (0, -1, 3)
expected_result = (2, 13, 21, 59, 58, 186, 68, 583)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_006(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
expected_result = (2, 13, 21, 59, 58, 186, 68, 583)
fftaps = (2, 1)
fbtaps = (0, -1)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
fftaps = (2, 11, 0)
fbtaps = (0, -1, 3)
op.set_taps(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_007(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
expected_result = (2,2,5,5,8,8,11,11)
fftaps = (2, 1)
fbtaps = (0, -1)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
fftaps = (2,0,1)
fbtaps = (0, -1)
op.set_taps(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_iir_direct_008(self):
src_data = (1, 2, 3, 4, 5, 6, 7, 8)
expected_result = (2,4,4,10,18,14,26,56)
fftaps = (2,)
fbtaps = (0, 1)
src = blocks.vector_source_f(src_data)
op = filter.iir_filter_ffd(fftaps, fbtaps)
fftaps_data = (1)
fbtaps = (0,0, -1,3)
op.set_taps(fftaps, fbtaps)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_iir_filter, "test_iir_filter.xml")
|
rghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortimanager/fmgr_script.py
|
44
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_script
version_added: "2.5"
author: Andrew Welsh
short_description: Add/Edit/Delete and execute scripts
description: Create/edit/delete scripts and execute the scripts on the FortiManager using jsonrpc API
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
state:
description:
- The desired state of the specified object.
- present - will create a script.
- execute - execute the scipt.
- delete - delete the script.
required: false
default: present
choices: ["present", "execute", "delete"]
script_name:
description:
- The name of the script.
required: True
script_type:
description:
- The type of script (CLI or TCL).
required: false
script_target:
description:
- The target of the script to be run.
required: false
script_description:
description:
- The description of the script.
required: false
script_content:
description:
- The script content that will be executed.
required: false
script_scope:
description:
- (datasource) The devices that the script will run on, can have both device member and device group member.
required: false
script_package:
description:
- (datasource) Policy package object to run the script against
required: false
'''
EXAMPLES = '''
- name: CREATE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
script_type: "cli"
script_target: "remote_device"
script_description: "Create by Ansible"
script_content: "get system status"
- name: EXECUTE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "execute"
script_scope: "FGT1,FGT2"
- name: DELETE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "delete"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: string
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def set_script(fmg, script_name, script_type, script_content, script_desc, script_target, adom):
"""
This method sets a script.
"""
datagram = {
'content': script_content,
'desc': script_desc,
'name': script_name,
'target': script_target,
'type': script_type,
}
url = '/dvmdb/adom/{adom}/script/'.format(adom=adom)
response = fmg.set(url, datagram)
return response
def delete_script(fmg, script_name, adom):
"""
This method deletes a script.
"""
datagram = {
'name': script_name,
}
url = '/dvmdb/adom/{adom}/script/{script_name}'.format(adom=adom, script_name=script_name)
response = fmg.delete(url, datagram)
return response
def execute_script(fmg, script_name, scope, package, adom, vdom):
"""
This method will execute a specific script.
"""
scope_list = list()
scope = scope.replace(' ', '')
scope = scope.split(',')
for dev_name in scope:
scope_list.append({'name': dev_name, 'vdom': vdom})
datagram = {
'adom': adom,
'script': script_name,
'package': package,
'scope': scope_list,
}
url = '/dvmdb/adom/{adom}/script/execute'.format(adom=adom)
response = fmg.execute(url, datagram)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
vdom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
state=dict(choices=["execute", "delete", "present"], type="str"),
script_name=dict(required=True, type="str"),
script_type=dict(required=False, type="str"),
script_target=dict(required=False, type="str"),
script_description=dict(required=False, type="str"),
script_content=dict(required=False, type="str"),
script_scope=dict(required=False, type="str"),
script_package=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True,)
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if "FortiManager instance connnected" not in str(response):
module.fail_json(msg="Connection to FortiManager Failed")
else:
adom = module.params["adom"]
if adom is None:
adom = "root"
vdom = module.params["vdom"]
if vdom is None:
vdom = "root"
state = module.params["state"]
if state is None:
state = "present"
script_name = module.params["script_name"]
script_type = module.params["script_type"]
script_target = module.params["script_target"]
script_description = module.params["script_description"]
script_content = module.params["script_content"]
script_scope = module.params["script_scope"]
script_package = module.params["script_package"]
# if state is present (default), then add the script
if state == "present":
results = set_script(fmg, script_name, script_type, script_content, script_description, script_target, adom)
if not results[0] == 0:
if isinstance(results[1], list):
module.fail_json(msg="Adding Script Failed", **results)
else:
module.fail_json(msg="Adding Script Failed")
elif state == "execute":
results = execute_script(fmg, script_name, script_scope, script_package, adom, vdom)
if not results[0] == 0:
module.fail_json(msg="Script Execution Failed", **results)
elif state == "delete":
results = delete_script(fmg, script_name, adom)
if not results[0] == 0:
module.fail_json(msg="Script Deletion Failed", **results)
fmg.logout()
# results is returned as a tuple
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
ridfrustum/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/bin/unique-messages.py
|
454
|
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
|
indictranstech/focal-erpnext
|
refs/heads/develop
|
setup/doctype/global_defaults/global_defaults.py
|
34
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.core.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"print_style": "print_style",
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total_export", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total_export", "print_hide", self.disable_rounded_total, "Check")
|
dedupeio/dedupe
|
refs/heads/master
|
tests/duplicateCluster_memory_case.py
|
2
|
import random
import dedupe.core
import dedupe.dedupe # noqa: F401
# simulated_candidates = (((1, {'name': 'asdffdsa'}), (2, {'name': 'fdsaasdf'}))
# for _ in xrange(10**6))
# data_model = {"fields": {"name": {"type": "String", "weight": -1.0}},
# "bias": 1.0}
# threshold = 0
# dupes = dedupe.core.scoreDuplicates(simulated_candidates,
# data_model,
# 0)
# simulated_candidates = (((1, {'name': 'asdffdsa'}), (2, {'name': 'fdsaasdf'}))
# for _ in xrange(10**7))
# deduper = dedupe.dedupe.Dedupe({"name": {"type": "String", "weight": -1.0}})
# clusters = deduper.duplicateClusters(simulated_candidates, 0, 0)
def candidates_gen():
candidate_set = set([])
for _ in range(10**5):
block = [((random.randint(0, 1000), 'a'),
(random.randint(0, 1000), 'b'))]
for candidate in block:
pair_ids = (candidate[0][0], candidate[1][0])
if pair_ids not in candidate_set:
yield candidate
candidate_set.add(pair_ids)
del candidate_set
@profile # noqa: F821
def generator_test():
a = sum(candidate[0][0] for candidate in candidates_gen())
print(a)
generator_test()
|
sergei-maertens/django
|
refs/heads/master
|
tests/flatpages_tests/test_views.py
|
36
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/flatpage_root/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middleware is disabled"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get('/flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middleware is disabled and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here')
self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)
|
MartinThoma/algorithms
|
refs/heads/master
|
Python/structured-logging/pure_example.py
|
1
|
import json
import logging
import os
import sys
from typing import Optional
from pythonjsonlogger import jsonlogger
def is_local(local_str: Optional[str]) -> bool:
if local_str is None or local_str.lower() in ["n", "0", "false", "no"]:
return False
else:
return True
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
logger.addHandler(handler)
if is_local(os.environ.get("LOCAL")):
# https://docs.python.org/2/library/logging.html#logrecord-attributes
formatter = logging.Formatter(
"{asctime} [{levelname:<9}] {message}", "%H:%M:%S", style="{"
)
handler.setFormatter(formatter)
else:
formatter = jsonlogger.JsonFormatter()
handler.setFormatter(formatter)
if __name__ == "__main__":
logger.info("This is an info message")
logger.error("This is an error message", extra={"ip": "10.12.13"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.